StarRocksCnSpec defines the desired state of cn.
StarRocksCnStatus represents the status of starrocks cn.
@@ -2436,8 +2436,8 @@ string
phase
-
-MemberPhase
+
+ComponentPhase
|
@@ -2865,8 +2865,8 @@ string
phase
-
-MemberPhase
+
+ComponentPhase
|
@@ -3222,8 +3222,8 @@ string
phase
-
-MemberPhase
+
+ComponentPhase
|
@@ -3641,8 +3641,8 @@ string
phase
-
-MemberPhase
+
+ComponentPhase
|
@@ -4115,6 +4115,175 @@ The range of valid ports is 30000-32767
+
StarRocksWarehouse
+
+
+
StarRocksWarehouse defines a starrocks warehouse.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+StarRocksWarehouseSpec
+
+
+ |
+
+ Spec represents the specification of desired state of a starrocks warehouse.
+
+
+
+
+
+starRocksCluster
+
+string
+
+ |
+
+ StarRocksCluster is the name of a StarRocksCluster which the warehouse belongs to.
+ |
+
+
+
+starRocksCnSpec
+
+
+StarRocksCnSpec
+
+
+ |
+
+ StarRocksCnSpec define cn component configuration for start cn service.
+ |
+
+
+ |
+
+
+
+status
+
+
+StarRocksWarehouseStatus
+
+
+ |
+
+ Status represents the recent observed status of the starrocks warehouse.
+ |
+
+
+
+
StarRocksWarehouseSpec
+
+
+(Appears on:StarRocksWarehouse)
+
+
+
StarRocksWarehouseSpec defines the desired state of StarRocksWarehouse
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+starRocksCluster
+
+string
+
+ |
+
+ StarRocksCluster is the name of a StarRocksCluster which the warehouse belongs to.
+ |
+
+
+
+starRocksCnSpec
+
+
+StarRocksCnSpec
+
+
+ |
+
+ StarRocksCnSpec define cn component configuration for start cn service.
+ |
+
+
+
+
StarRocksWarehouseStatus
+
+
+(Appears on:StarRocksWarehouse)
+
+
+
StarRocksWarehouseStatus defines the observed state of StarRocksWarehouse.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+phase
+
+
+Phase
+
+
+ |
+
+ Phase represents the state of a warehouse. The possible value are: running, failed, pending and deleting.
+ |
+
+
+
+starRocksCnStatus
+
+
+StarRocksCnStatus
+
+
+ |
+
+ StarRocksCnStatus represents the status of cn service. The status has reconciling, failed and running.
+ |
+
+
+
StorageVolume
diff --git a/go.mod b/go.mod
index 7fa7541f..b242edef 100644
--- a/go.mod
+++ b/go.mod
@@ -4,10 +4,12 @@ go 1.19
require (
github.com/davecgh/go-spew v1.1.1
+ github.com/go-sql-driver/mysql v1.7.1
github.com/onsi/ginkgo/v2 v2.8.1
github.com/onsi/gomega v1.26.0
github.com/spf13/viper v1.13.0
github.com/stretchr/testify v1.8.0
+ gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.25.0
k8s.io/apimachinery v0.25.0
k8s.io/client-go v0.25.0
@@ -83,7 +85,6 @@ require (
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.25.0 // indirect
k8s.io/component-base v0.25.0 // indirect
diff --git a/go.sum b/go.sum
index 94e13976..2633acf7 100644
--- a/go.sum
+++ b/go.sum
@@ -155,6 +155,8 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
diff --git a/helm-charts/charts/kube-starrocks/charts/operator/templates/clusterrole.yaml b/helm-charts/charts/kube-starrocks/charts/operator/templates/clusterrole.yaml
index af9b2920..92e5dc03 100644
--- a/helm-charts/charts/kube-starrocks/charts/operator/templates/clusterrole.yaml
+++ b/helm-charts/charts/kube-starrocks/charts/operator/templates/clusterrole.yaml
@@ -54,18 +54,21 @@ rules:
- starrocks.com
resources:
- starrocksclusters
+ - starrockswarehouses
verbs:
- '*'
- apiGroups:
- starrocks.com
resources:
- starrocksclusters/finalizers
+ - starrockswarehouses/finalizers
verbs:
- update
- apiGroups:
- starrocks.com
resources:
- starrocksclusters/status
+ - starrockswarehouses/status
verbs:
- get
- patch
diff --git a/helm-charts/charts/kube-starrocks/charts/starrocks/templates/beconfigmap.yaml b/helm-charts/charts/kube-starrocks/charts/starrocks/templates/beconfigmap.yaml
index bb862c90..b873bda7 100644
--- a/helm-charts/charts/kube-starrocks/charts/starrocks/templates/beconfigmap.yaml
+++ b/helm-charts/charts/kube-starrocks/charts/starrocks/templates/beconfigmap.yaml
@@ -1,4 +1,4 @@
-{{- if and .Values.starrocksBeSpec .Values.starrocksBeSpec.config }}
+{{- if and .Values.starrocksCluster.enabledBe .Values.starrocksBeSpec .Values.starrocksBeSpec.config }}
apiVersion: v1
kind: ConfigMap
metadata:
diff --git a/helm-charts/charts/kube-starrocks/charts/starrocks/templates/init-pwd/job.yaml b/helm-charts/charts/kube-starrocks/charts/starrocks/templates/init-pwd/job.yaml
index 9f1a5ddb..f932fe6e 100644
--- a/helm-charts/charts/kube-starrocks/charts/starrocks/templates/init-pwd/job.yaml
+++ b/helm-charts/charts/kube-starrocks/charts/starrocks/templates/init-pwd/job.yaml
@@ -10,7 +10,7 @@ spec:
containers:
- name: {{ template "starrockscluster.name" . }}-initpwd
image: {{ .Values.starrocksFESpec.image.repository }}:{{ .Values.starrocksFESpec.image.tag }}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
command:
- /bin/bash
args:
@@ -38,4 +38,4 @@ spec:
name: fe-initpwd-shell
restartPolicy: OnFailure
backoffLimit: 10
-{{- end }}
\ No newline at end of file
+{{- end }}
diff --git a/helm-charts/charts/kube-starrocks/charts/starrocks/templates/starrockscluster.yaml b/helm-charts/charts/kube-starrocks/charts/starrocks/templates/starrockscluster.yaml
index cc670df1..a98fdfdc 100644
--- a/helm-charts/charts/kube-starrocks/charts/starrocks/templates/starrockscluster.yaml
+++ b/helm-charts/charts/kube-starrocks/charts/starrocks/templates/starrockscluster.yaml
@@ -156,7 +156,7 @@ spec:
{{- end }}
{{- end }}
-{{- if .Values.starrocksBeSpec }}
+{{- if .Values.starrocksCluster.enabledBe }}
starRocksBeSpec:
image: "{{ .Values.starrocksBeSpec.image.repository }}:{{ .Values.starrocksBeSpec.image.tag }}"
replicas: {{ .Values.starrocksBeSpec.replicas }}
diff --git a/helm-charts/charts/kube-starrocks/charts/starrocks/values.yaml b/helm-charts/charts/kube-starrocks/charts/starrocks/values.yaml
index 84d5f81c..46fc7585 100644
--- a/helm-charts/charts/kube-starrocks/charts/starrocks/values.yaml
+++ b/helm-charts/charts/kube-starrocks/charts/starrocks/values.yaml
@@ -51,6 +51,7 @@ starrocksCluster:
# annotations for starrocks cluster.
annotations: {}
# specify the cn deploy or not.
+ enabledBe: true
enabledCn: false
# spec to deploy fe.
@@ -462,7 +463,7 @@ starrocksBeSpec:
limits:
cpu: 8
memory: 8Gi
- # specify storageclass name and request size.
+ # be storageSpec for persistent storage.
storageSpec:
# the name of volume for mount. if not will use emptyDir.
name: ""
diff --git a/helm-charts/charts/kube-starrocks/values.yaml b/helm-charts/charts/kube-starrocks/values.yaml
index eaf3985d..f7e05018 100644
--- a/helm-charts/charts/kube-starrocks/values.yaml
+++ b/helm-charts/charts/kube-starrocks/values.yaml
@@ -125,6 +125,7 @@ starrocks:
# annotations for starrocks cluster.
annotations: {}
# specify the cn deploy or not.
+ enabledBe: true
enabledCn: false
# spec to deploy fe.
@@ -536,7 +537,7 @@ starrocks:
limits:
cpu: 8
memory: 8Gi
- # specify storageclass name and request size.
+ # be storageSpec for persistent storage.
storageSpec:
# the name of volume for mount. if not will use emptyDir.
name: ""
diff --git a/helm-charts/charts/warehouse/.helmignore b/helm-charts/charts/warehouse/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/helm-charts/charts/warehouse/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/helm-charts/charts/warehouse/Chart.yaml b/helm-charts/charts/warehouse/Chart.yaml
new file mode 100644
index 00000000..70b9df38
--- /dev/null
+++ b/helm-charts/charts/warehouse/Chart.yaml
@@ -0,0 +1,46 @@
+apiVersion: v2
+name: warehouse
+description: A Helm chart for StarRocksWarehouse
+
+icon: https://avatars.githubusercontent.com/u/88238841
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+maintainers:
+ - name: Kevin Cai
+ email: caixiaohua@starrocks.com
+ - name: Dongxiao Yan
+ email: yandongxiao@starrocks.com
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 1.9.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: 3.1-latest
+
+kubeVersion: ">=1.18.3-0"
+
+keywords:
+ - operator
+ - starrocks
+ - database
+ - olap
+ - multi-warehouse
+
+sources:
+ - https://github.com/StarRocks/starrocks
+
+home: https://github.com/StarRocks/starrocks-kubernetes-operator
diff --git a/helm-charts/charts/warehouse/README.md b/helm-charts/charts/warehouse/README.md
new file mode 100644
index 00000000..63b3861f
--- /dev/null
+++ b/helm-charts/charts/warehouse/README.md
@@ -0,0 +1,43 @@
+# Deploy StarRocks Warehouse by starrocks Chart
+
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Release Charts](https://img.shields.io/badge/Release-helmcharts-green.svg)](https://github.com/StarRocks/starrocks-kubernetes-operator/releases)
+
+[Helm](https://helm.sh/) is a package manager for Kubernetes. A [Helm Chart](https://helm.sh/docs/topics/charts/) is a
+Helm package and contains all of the resource definitions necessary to run an application on a Kubernetes cluster. This
+topic describes how to use Helm to automatically deploy a StarRocks cluster on a Kubernetes cluster.
+
+## Before you begin
+
+- [Create a Kubernetes cluster](https://docs.starrocks.io/en-us/latest/deployment/sr_operator#create-kubernetes-cluster).
+- [Install Helm](https://helm.sh/docs/intro/quickstart/).
+- [Install StarRocks operator](../kube-starrocks/charts/operator/README.md).
+- [Install StarRocks cluster](../kube-starrocks/charts/starrocks/README.md).
+
+## Install starrocks Chart
+
+1. Add the StarRocks Helm repository.
+
+ ```bash
+ $ helm repo add starrocks-community https://starrocks.github.io/starrocks-kubernetes-operator
+ $ helm repo update
+ $ helm search repo starrocks-community
+ NAME CHART VERSION APP VERSION DESCRIPTION
+ starrocks-community/kube-starrocks 1.9.0 3.1-latest kube-starrocks includes two subcharts, starrock...
+ starrocks-community/operator 1.9.0 1.9.0 A Helm chart for StarRocks operator
+ starrocks-community/starrocks 1.9.0 3.1-latest A Helm chart for StarRocks cluster
+ starrocks-community/warehouse 1.9.0 3.1-latest A Helm chart for StarRocks cluster
+ ```
+
+2. Install the starrocks Chart.
+
+ ```bash
+ helm install warehouse starrocks-community/warehouse
+ ```
+
+ Please see [values.yaml](./values.yaml) for more details.
+
+## Uninstall starrocks Chart
+
+```bash
+helm uninstall warehouse
+```
diff --git a/helm-charts/charts/warehouse/templates/NOTES.txt b/helm-charts/charts/warehouse/templates/NOTES.txt
new file mode 100644
index 00000000..af840eba
--- /dev/null
+++ b/helm-charts/charts/warehouse/templates/NOTES.txt
@@ -0,0 +1 @@
+Thank you for installing {{ .Chart.Name }}-{{ .Chart.Version }} warehouse chart. Please wait for a few minutes for the warehouse to be ready.
diff --git a/helm-charts/charts/warehouse/templates/_helpers.tpl b/helm-charts/charts/warehouse/templates/_helpers.tpl
new file mode 100644
index 00000000..2851da3d
--- /dev/null
+++ b/helm-charts/charts/warehouse/templates/_helpers.tpl
@@ -0,0 +1,70 @@
+{{- define "starrockswarehouse.name" -}}
+{{- default .Chart.Name .Values.nameOverride -}}
+{{- end }}
+
+{{- define "starrockswarehouse.namespace" -}}
+{{ .Release.Namespace }}
+{{- end }}
+
+{{- define "starrockswarehouse.labels" -}}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{- define "starrockswarehouse.configmap.name" -}}
+{{- print (include "starrockswarehouse.name" .) "-cm" }}
+{{- end }}
+
+{{- define "starrockswarehouse.config" -}}
+cn.conf: |
+{{- if .Values.spec.config | indent 2 }}
+{{ .Values.spec.config | indent 2 }}
+{{- end }}
+{{- end }}
+
+{{/*
+starrockswarehouse.config.hash is used to calculate the hash value of the cn.conf, and due to the length limit, only
+the first 8 digits are taken, which will be used as the annotations for pods.
+*/}}
+{{- define "starrockswarehouse.config.hash" }}
+ {{- if .Values.spec.config }}
+ {{- $hash := toJson .Values.spec.config | sha256sum | trunc 8 }}
+ {{- printf "%s" $hash }}
+ {{- else }}
+ {{- printf "no-config" }}
+ {{- end }}
+{{- end }}
+
+{{- define "starrockswarehouse.webserver.port" -}}
+{{- include "starrockswarehouse.get.webserver.port" .Values.spec }}
+{{- end }}
+
+{{- define "starrockswarehouse.get.webserver.port" -}}
+{{- $config := index .config -}}
+{{- $configMap := dict -}}
+{{- range $line := splitList "\n" $config -}}
+{{- $pair := splitList "=" $line -}}
+{{- if eq (len $pair) 2 -}}
+{{- $_ := set $configMap (trim (index $pair 0)) (trim (index $pair 1)) -}}
+{{- end -}}
+{{- end -}}
+{{- if (index $configMap "webserver_port") -}}
+{{- print (index $configMap "webserver_port") }}
+{{- end }}
+{{- end }}
+
+{{- define "starrockscluster.cn.data.suffix" -}}
+{{- print "-data" }}
+{{- end }}
+
+{{- define "starrockscluster.cn.data.path" -}}
+{{- print "/opt/starrocks/cn/storage" }}
+{{- end }}
+
+{{- define "starrockscluster.cn.log.suffix" -}}
+{{- print "-log" }}
+{{- end }}
+
+{{- define "starrockscluster.cn.log.path" -}}
+{{- print "/opt/starrocks/cn/log" }}
+{{- end }}
diff --git a/helm-charts/charts/warehouse/templates/cnconfigmap.yaml b/helm-charts/charts/warehouse/templates/cnconfigmap.yaml
new file mode 100644
index 00000000..97eabf85
--- /dev/null
+++ b/helm-charts/charts/warehouse/templates/cnconfigmap.yaml
@@ -0,0 +1,13 @@
+{{- if and .Values.spec .Values.spec.config }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "starrockswarehouse.configmap.name" . }}
+ namespace: {{ template "starrockswarehouse.namespace" . }}
+ labels:
+ warehouse: {{ template "starrockswarehouse.name" . }}
+ app: "cn"
+data:
+{{ include "starrockswarehouse.config" . | indent 2 }}
+
+{{- end }}
diff --git a/helm-charts/charts/warehouse/templates/configmaps.yaml b/helm-charts/charts/warehouse/templates/configmaps.yaml
new file mode 100644
index 00000000..494dd9eb
--- /dev/null
+++ b/helm-charts/charts/warehouse/templates/configmaps.yaml
@@ -0,0 +1,14 @@
+{{- range .Values.configMaps }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .name }}
+data:
+ {{- range $key, $value := .data }}
+ {{- if $value }}
+ {{ $key }}: |
+ {{- $value | nindent 4 }}
+ {{- end }}
+ {{- end }}
+---
+{{- end }}
\ No newline at end of file
diff --git a/helm-charts/charts/warehouse/templates/secrets.yaml b/helm-charts/charts/warehouse/templates/secrets.yaml
new file mode 100644
index 00000000..4e7ba484
--- /dev/null
+++ b/helm-charts/charts/warehouse/templates/secrets.yaml
@@ -0,0 +1,14 @@
+{{- range .Values.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .name }}
+type: Opaque
+data:
+ {{- range $key, $value := .data }}
+ {{- if $value }}
+ {{ $key }}: {{ $value | b64enc }}
+ {{- end }}
+ {{- end }}
+---
+{{- end }}
\ No newline at end of file
diff --git a/helm-charts/charts/warehouse/templates/starrockswarehouse.yaml b/helm-charts/charts/warehouse/templates/starrockswarehouse.yaml
new file mode 100644
index 00000000..5f10f964
--- /dev/null
+++ b/helm-charts/charts/warehouse/templates/starrockswarehouse.yaml
@@ -0,0 +1,137 @@
+apiVersion: starrocks.com/v1
+kind: StarRocksWarehouse
+metadata:
+ name: {{ template "starrockswarehouse.name" . }}
+ namespace: {{ template "starrockswarehouse.namespace" . }}
+ labels:
+ warehouse: {{ template "starrockswarehouse.name" . }}
+ {{- include "starrockswarehouse.labels" . | nindent 4 }}
+spec:
+ starRocksCluster: {{ .Values.spec.starRocksClusterName }}
+ template:
+ image: "{{ .Values.spec.image.repository }}:{{ .Values.spec.image.tag }}"
+ {{- if .Values.spec.replicas }}
+ replicas: {{ .Values.spec.replicas }}
+ {{- end }}
+ {{- if .Values.spec.serviceAccount }}
+ serviceAccount: {{ .Values.spec.serviceAccount }}
+ {{- end }}
+ runAsNonRoot: {{ .Values.spec.runAsNonRoot }}
+ {{- if .Values.spec.podLabels }}
+ podLabels:
+ {{- toYaml .Values.spec.podLabels | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.hostAliases }}
+ hostAliases:
+ {{- toYaml .Values.spec.hostAliases | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.schedulerName }}
+ schedulerName: {{ .Values.spec.schedulerName }}
+ {{- end }}
+ {{- if .Values.spec.nodeSelector }}
+ nodeSelector:
+ {{- toYaml .Values.spec.nodeSelector | nindent 6 }}
+ {{- end }}
+ envVars:
+ - name: TZ
+ value: {{ .Values.timeZone }}
+ {{- if .Values.datadog.log.enabled }}
+ - name: LOG_CONSOLE
+ value: "1"
+ {{- end }}
+ {{- if .Values.spec.envVars }}
+ {{- toYaml .Values.spec.envVars | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.affinity }}
+ affinity:
+ {{- toYaml .Values.spec.affinity | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.tolerations }}
+ tolerations:
+ {{- toYaml .Values.spec.tolerations | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.autoScalingPolicy }}
+ autoScalingPolicy:
+ {{- toYaml .Values.spec.autoScalingPolicy | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.resources }}
+ {{- toYaml .Values.spec.resources | nindent 4 }}
+ {{- end }}
+ {{- if .Values.spec.service.type }}
+ service:
+ type: {{ .Values.spec.service.type }}
+ {{- if and (eq "LoadBalancer" .Values.spec.service.type) .Values.spec.service.loadbalancerIP }}
+ loadBalancerIP: {{ .Values.spec.service.loadbalancerIP }}
+ {{- end }}
+ {{- if .Values.spec.service.ports }}
+ ports:
+ {{- toYaml .Values.spec.service.ports | nindent 8 }}
+ {{- end }}
+ {{- if or .Values.spec.service.annotations .Values.datadog.metrics.enabled }}
+ annotations:
+ {{- if .Values.datadog.metrics.enabled }}
+ prometheus.io/path: "/metrics"
+ prometheus.io/port: "{{- default 8040 (include "starrockswarehouse.webserver.port" .) }}"
+ prometheus.io/scrape: "true"
+ {{- end }}
+ {{- if .Values.spec.service.annotations }}
+ {{- toYaml .Values.spec.service.annotations | nindent 8 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ annotations:
+ app.starrocks.io/cn-config-hash: {{template "starrockswarehouse.config.hash" . }}
+ {{- if .Values.datadog.log.enabled }}
+ {{- if eq (trimAll " {}" .Values.datadog.log.logConfig) "" }}
+ ad.datadoghq.com/warehouse.logs: '[{"service":"warehouse"}]'
+ {{- else }}
+ ad.datadoghq.com/warehouse.logs: {{ printf "[%s]" (printf "{%s, \"service\": \"warehouse\"}" (trimAll " {}" .Values.datadog.log.logConfig) | fromJson | toJson) | squote }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.spec.annotations }}
+ {{ toYaml .Values.spec.annotations | indent 6 }}
+ {{- end }}
+ {{- if .Values.spec.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml .Values.spec.imagePullSecrets | nindent 6 }}
+ {{- end }}
+ {{- if .Values.spec.secrets }}
+ secrets:
+ {{- range .Values.spec.secrets }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ subPath: {{ .subPath }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.spec.configMaps }}
+ configMaps:
+ {{- range .Values.spec.configMaps }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ subPath: {{ .subPath }}
+ {{- end }}
+ {{- end }}
+ configMapInfo:
+ configMapName: {{template "starrockswarehouse.configmap.name" . }}
+ resolveKey: cn.conf
+ {{- if .Values.spec.storageSpec.name }}
+ storageVolumes:
+ - name: {{ .Values.spec.storageSpec.name }}{{template "starrockscluster.cn.data.suffix" . }}
+ {{- if .Values.spec.storageSpec.storageClassName }}
+ storageClassName: {{ .Values.spec.storageSpec.storageClassName }}
+ {{- end }}
+ {{- if .Values.spec.storageSpec.storageSize }}
+ storageSize: {{ .Values.spec.storageSpec.storageSize }}
+ {{- end }}
+ mountPath: {{template "starrockscluster.cn.data.path" . }}
+ {{- if .Values.spec.storageSpec.logStorageSize }}
+ - name: {{ .Values.spec.storageSpec.name }}{{template "starrockscluster.cn.log.suffix" . }}
+ {{- if .Values.spec.storageSpec.storageClassName }}
+ storageClassName: {{ .Values.spec.storageSpec.storageClassName }}
+ {{- end }}
+ {{- if .Values.spec.storageSpec.logStorageSize }}
+ storageSize: {{ .Values.spec.storageSpec.logStorageSize }}
+ {{- end }}
+ mountPath: {{template "starrockscluster.cn.log.path" . }}
+ {{- end }}
+ {{- end }}
diff --git a/helm-charts/charts/warehouse/values.yaml b/helm-charts/charts/warehouse/values.yaml
new file mode 100644
index 00000000..fede0189
--- /dev/null
+++ b/helm-charts/charts/warehouse/values.yaml
@@ -0,0 +1,198 @@
+# Ensure that the warehouse instance name differs from the starrocks instance name to avoid conflicts. For example, both
+# will generate configmaps using the 'nameOverride' as a prefix to the configmap name, followed by a specific suffix.
+nameOverride: "kube-starrocks"
+
+# TimeZone is used to set the environment variable TZ for pod, with Asia/Shanghai as the default.
+timeZone: Asia/Shanghai
+
+# This configuration is used to integrate with external system DataDog.
+# You can enable the integration by setting the enabled to true, e.g. datalog.log.enabled=true will enable datadog agent
+# to collect the log.
+datadog:
+ log:
+ enabled: false
+ # besides the attributes you added, chart will append "source" and "service" attributes to the log config.
+ # see https://docs.datadoghq.com/containers/kubernetes/log/?tab=operator for more details.
+ logConfig: '{}' # e.g. '{"app": "starrocks", "tags": ["aa", "bb"]}'
+ metrics:
+ enabled: false
+
+spec:
+ # The name of starrocks cluster, a must-have field.
+ # Note: the starrocks cluster must be created before the warehouse by operator.
+ starRocksClusterName:
+
+ # number of replicas to deploy.
+ # In the implementation of the operator: Even when both the replicas and autoScalingPolicy are set in the spec
+ # field, the replicas field of the statefulset created by the operator will be set to null. This ensures that the replicas
+ # field is controlled by HPA. Next, if a user's deployment, for example, does not involve changes to the statefulset,
+ # then the pods will not be recreated.
+ # Why should the replicas field in this values.yaml be set to null? This is the default replicas. When the user removes
+ # the autoScalingPolicy fields from the spec, the corresponding HPA object will be deleted. And when the replicas
+ # field is set, the number of pods will immediately revert to the replicas count, even though the user did not specify
+ # the replicas count in their own values.yaml.
+ # replicas: 1
+ image:
+ # image sliced by "repository:tag"
+ repository: starrocks/cn-ubuntu
+ tag: 3.1-latest
+ # serviceAccount for pod access cloud service.
+ serviceAccount: ""
+ # add annotations for pods. example, if you want to config monitor for datadog, you can config the annotations.
+ annotations: {}
+ # If runAsNonRoot is true, the container is run as non-root user.
+ # The userId will be set to 1000, and the groupID will be set to 1000.
+ runAsNonRoot: false
+ # specify the service name and port config and serviceType
+ # the service type refer https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ service:
+ # the service type, only supported ClusterIP, NodePort, LoadBalancer
+ type: "ClusterIP"
+ # the loadBalancerIP for static ip config when the type=LoadBalancer and loadBalancerIp is not empty.
+ loadbalancerIP: ""
+ # add annotations for service.
+ annotations: {}
+ # config the service port for service.
+ # if you want to use a dedicated port for service, you can config the port.
+ # see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
+ ports: []
+ # e.g. use a dedicated node port for service. The containerPort and port field can be commented out.
+ # - name: webserver
+ # nodePort: 30040 # The range of valid ports is 30000-32767
+ # containerPort: 8040 # The port on the container to expose
+ # port: 8040 # The port to expose on the service
+ # imagePullSecrets allows you to use secrets for pulling images for your pods.
+ imagePullSecrets: []
+ # - name: "image-pull-secret"
+ # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes"
+ # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # kubernetes.io/arch: amd64
+ # kubernetes.io/os: linux
+ # the pod labels for user select or classify pods.
+ podLabels: {}
+ ## hostAliases allows adding entries to /etc/hosts inside the containers
+ hostAliases: []
+ # - ip: "127.0.0.1"
+ # hostnames:
+ # - "example.com"
+ # schedulerName allows you to specify which scheduler will be used for the pod
+ schedulerName: ""
+ # Additional container environment variables
+ # You specify this manually like you would a raw deployment manifest.
+ # This means you can bind in environment variables from secrets.
+ # Ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
+ envVars: []
+ # e.g. static environment variable:
+ # - name: DEMO_GREETING
+ # value: "Hello from the environment"
+ # e.g. secret environment variable:
+ # - name: USERNAME
+ # valueFrom:
+ # secretKeyRef:
+ # name: mysecret
+ # key: username
+ # affinity for pod scheduling.
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchFields:
+ # - key: metadata.name
+ # operator: In
+ # values:
+ # - target-host-name
+ # Node tolerations for pod scheduling to nodes with taints
+ # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+ autoScalingPolicy: {}
+ # you can select different versions of HPA (Horizontal Pod Autoscaler) based on the Kubernetes version you are
+ # using to ensure compatibility and adaptability. the default version is v2beta2.
+ # version: v2beta2
+ # maxReplicas: 10
+ # minReplicas: 1
+ # hpaPolicy:
+ # metrics:
+ # - type: Resource
+ # resource:
+ # name: memory
+ # target:
+ # averageUtilization: 30
+ # type: Utilization
+ # - type: Resource
+ # resource:
+ # name: cpu
+ # target:
+ # averageUtilization: 30
+ # type: Utilization
+ # behavior:
+ # scaleUp:
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 10
+ # scaleDown:
+ # selectPolicy: Disabled
+ # define resources requests and limits for pods.
+ resources:
+ limits:
+ cpu: 8
+ memory: 8Gi
+ requests:
+ cpu: 4
+ memory: 8Gi
+ # the config start for cn, the base information as follows.
+ config: |
+ sys_log_level = INFO
+ # ports for admin, web, heartbeat service
+ thrift_port = 9060
+ webserver_port = 8040
+ heartbeat_service_port = 9050
+ brpc_port = 8060
+ # mount secrets if necessary.
+ # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath.
+ secrets: []
+ # e.g. mount my-secret to /etc/my-secret
+ # - name: my-secret
+ # mountPath: /etc/my-secret
+ # subPath: ""
+ # mount configmaps if necessary.
+ # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath.
+ configMaps: []
+ # e.g. mount my-configmap to /etc/my-configmap
+ # - name: my-configmap
+ # mountPath: /etc/my-configmap
+ # subPath: ""
+ # n storageSpec for persistent data.
+ storageSpec:
+ # the name of volume for mount. if not will use emptyDir.
+ name: ""
+ # the storageClassName represent the used storageclass name. If not set will use k8s cluster default storageclass.
+ # you must set name when you set storageClassName
+ storageClassName: ""
+ storageSize: 100Gi
+ # Setting this parameter can persist log storage
+ logStorageSize: 1Gi
+
+
+# create secrets if necessary.
+secrets: []
+ # e.g. create my-secret
+ # - name: my-secret
+ # data:
+ # key: |
+ # this is the content of the secret
+ # when mounted, key will be the name of the file
+
+# create configmaps if necessary.
+configMaps: []
+ # e.g. create my-configmap
+ # - name: my-configmap
+ # data:
+ # key: |
+ # this is the content of the configmap
+ # when mounted, key will be the name of the file
diff --git a/pkg/apis/starrocks/v1/component_type.go b/pkg/apis/starrocks/v1/component_type.go
index 1b275cfd..4ff034c0 100644
--- a/pkg/apis/starrocks/v1/component_type.go
+++ b/pkg/apis/starrocks/v1/component_type.go
@@ -70,7 +70,7 @@ type StarRocksComponentStatus struct {
// Phase the value from all pods of component status. If component have one failed pod phase=failed,
// also if fe have one creating pod phase=creating, also if component all running phase=running, others unknown.
- Phase MemberPhase `json:"phase"`
+ Phase ComponentPhase `json:"phase"`
// +optional
// Reason represents the reason of not running.
diff --git a/pkg/apis/starrocks/v1/load_type.go b/pkg/apis/starrocks/v1/load_type.go
index 11ef9098..b5b1d700 100644
--- a/pkg/apis/starrocks/v1/load_type.go
+++ b/pkg/apis/starrocks/v1/load_type.go
@@ -16,6 +16,7 @@ type loadInterface interface {
GetStartupProbeFailureSeconds() *int32
GetLivenessProbeFailureSeconds() *int32
GetReadinessProbeFailureSeconds() *int32
+ GetService() *StarRocksService
GetStorageVolumes() []StorageVolume
GetServiceAccount() string
@@ -200,6 +201,10 @@ func (spec *StarRocksLoadSpec) GetTolerations() []corev1.Toleration {
return spec.Tolerations
}
+func (spec *StarRocksLoadSpec) GetService() *StarRocksService {
+ return spec.Service
+}
+
func (spec *StarRocksLoadSpec) GetNodeSelector() map[string]string {
return spec.NodeSelector
}
diff --git a/pkg/apis/starrocks/v1/starrockscluster_types.go b/pkg/apis/starrocks/v1/starrockscluster_types.go
index 68124c46..fa88a3bd 100644
--- a/pkg/apis/starrocks/v1/starrockscluster_types.go
+++ b/pkg/apis/starrocks/v1/starrockscluster_types.go
@@ -47,7 +47,7 @@ type StarRocksClusterSpec struct {
// StarRocksClusterStatus defines the observed state of StarRocksCluster.
type StarRocksClusterStatus struct {
// Represents the state of cluster. the possible value are: running, failed, pending
- Phase ClusterPhase `json:"phase"`
+ Phase Phase `json:"phase"`
// Represents the status of fe. the status have running, failed and creating pods.
StarRocksFeStatus *StarRocksFeStatus `json:"starRocksFeStatus,omitempty"`
@@ -189,34 +189,41 @@ func (spec *StarRocksFeProxySpec) GetTerminationGracePeriodSeconds() *int64 {
return nil
}
-// ClusterPhase represent the cluster phase. the possible value for cluster phase are: running, failed, pending.
-type ClusterPhase string
+// Phase is defined under status, e.g.
+// 1. StarRocksClusterStatus.Phase represents the phase of starrocks cluster.
+// 2. StarRocksWarehouseStatus.Phase represents the phase of starrocks warehouse.
+// The possible value for cluster phase are: running, failed, pending, deleting.
+type Phase string
-// MemberPhase represent the component phase about be, cn, be. the possible value for component phase are:
-// reconciling, failed, running.
-type MemberPhase string
+// ComponentPhase represent the component phase. e.g.
+// 1. StarRocksCluster contains three components: FE, CN, BE.
+// 2. StarRocksWarehouse reuse the CN component.
+// The possible value for component phase are: reconciling, failed, running.
+type ComponentPhase string
const (
// ClusterRunning represents starrocks cluster is running.
- ClusterRunning ClusterPhase = "running"
+ ClusterRunning Phase = "running"
// ClusterFailed represents starrocks cluster failed.
- ClusterFailed ClusterPhase = "failed"
+ ClusterFailed Phase = "failed"
// ClusterPending represents the starrocks cluster is creating
- ClusterPending ClusterPhase = "pending"
+ ClusterPending Phase = "pending"
// ClusterDeleting waiting all resource deleted
- ClusterDeleting ClusterPhase = "deleting"
+ ClusterDeleting Phase = "deleting"
)
const (
// ComponentReconciling the starrocks have component in starting.
- ComponentReconciling MemberPhase = "reconciling"
+ ComponentReconciling ComponentPhase = "reconciling"
+
// ComponentFailed have at least one service failed.
- ComponentFailed MemberPhase = "failed"
+ ComponentFailed ComponentPhase = "failed"
+
// ComponentRunning all components runs available.
- ComponentRunning MemberPhase = "running"
+ ComponentRunning ComponentPhase = "running"
)
// AnnotationOperationValue present the operation for fe, cn, be.
diff --git a/pkg/apis/starrocks/v1/starrockswarehouse_types.go b/pkg/apis/starrocks/v1/starrockswarehouse_types.go
new file mode 100644
index 00000000..ad9f0286
--- /dev/null
+++ b/pkg/apis/starrocks/v1/starrockswarehouse_types.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021-present, StarRocks Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+// StarRocksWarehouseSpec defines the desired state of StarRocksWarehouse
+type StarRocksWarehouseSpec struct {
+ // StarRocksCluster is the name of a StarRocksCluster which the warehouse belongs to.
+ StarRocksCluster string `json:"starRocksCluster"`
+
+ // Template define component configuration.
+ Template *WarehouseComponentSpec `json:"template"`
+}
+
+// WarehouseComponentSpec defines the desired state of component.
+type WarehouseComponentSpec struct {
+ StarRocksComponentSpec `json:",inline"`
+
+ // +optional
+ // envVars is a slice of environment variables that are added to the pods, the default is empty.
+ EnvVars []corev1.EnvVar `json:"envVars,omitempty"`
+
+ // AutoScalingPolicy defines auto scaling policy
+ AutoScalingPolicy *AutoScalingPolicy `json:"autoScalingPolicy,omitempty"`
+}
+
+func (componentSpec *WarehouseComponentSpec) ToCnSpec() *StarRocksCnSpec {
+ return &StarRocksCnSpec{
+ StarRocksComponentSpec: componentSpec.StarRocksComponentSpec,
+ CnEnvVars: componentSpec.EnvVars,
+ AutoScalingPolicy: componentSpec.AutoScalingPolicy,
+ }
+}
+
+// WarehouseComponentStatus represents the status of component.
+// +kubebuilder:object:generate=false
+type WarehouseComponentStatus = StarRocksCnStatus
+
+// StarRocksWarehouseStatus defines the observed state of StarRocksWarehouse.
+type StarRocksWarehouseStatus struct {
+ *WarehouseComponentStatus `json:",inline"`
+}
+
+// StarRocksWarehouse defines a starrocks warehouse.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:shortName=warehouse
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="status",type=string,JSONPath=`.status.phase`
+// +kubebuilder:printcolumn:name="reason",type=string,JSONPath=`.status.reason`
+// +kubebuilder:storageversion
+// +k8s:openapi-gen=true
+// +genclient
+type StarRocksWarehouse struct {
+ metav1.TypeMeta `json:",inline"`
+ // +k8s:openapi-gen=false
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec represents the specification of desired state of a starrocks warehouse.
+ Spec StarRocksWarehouseSpec `json:"spec,omitempty"`
+
+ // Status represents the recent observed status of the starrocks warehouse.
+ Status StarRocksWarehouseStatus `json:"status,omitempty"`
+}
+
+// StarRocksWarehouseList contains a list of StarRocksWarehouse
+// +kubebuilder:object:root=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+type StarRocksWarehouseList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []StarRocksWarehouse `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&StarRocksWarehouse{}, &StarRocksWarehouseList{})
+}
diff --git a/pkg/apis/starrocks/v1/zz_generated.deepcopy.go b/pkg/apis/starrocks/v1/zz_generated.deepcopy.go
index adcef60e..fdbf2d27 100644
--- a/pkg/apis/starrocks/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/starrocks/v1/zz_generated.deepcopy.go
@@ -929,6 +929,105 @@ func (in *StarRocksServicePort) DeepCopy() *StarRocksServicePort {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StarRocksWarehouse) DeepCopyInto(out *StarRocksWarehouse) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarRocksWarehouse.
+func (in *StarRocksWarehouse) DeepCopy() *StarRocksWarehouse {
+ if in == nil {
+ return nil
+ }
+ out := new(StarRocksWarehouse)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StarRocksWarehouse) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StarRocksWarehouseList) DeepCopyInto(out *StarRocksWarehouseList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StarRocksWarehouse, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarRocksWarehouseList.
+func (in *StarRocksWarehouseList) DeepCopy() *StarRocksWarehouseList {
+ if in == nil {
+ return nil
+ }
+ out := new(StarRocksWarehouseList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StarRocksWarehouseList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StarRocksWarehouseSpec) DeepCopyInto(out *StarRocksWarehouseSpec) {
+ *out = *in
+ if in.Template != nil {
+ in, out := &in.Template, &out.Template
+ *out = new(WarehouseComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarRocksWarehouseSpec.
+func (in *StarRocksWarehouseSpec) DeepCopy() *StarRocksWarehouseSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StarRocksWarehouseSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StarRocksWarehouseStatus) DeepCopyInto(out *StarRocksWarehouseStatus) {
+ *out = *in
+ if in.WarehouseComponentStatus != nil {
+ in, out := &in.WarehouseComponentStatus, &out.WarehouseComponentStatus
+ *out = new(StarRocksCnStatus)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarRocksWarehouseStatus.
+func (in *StarRocksWarehouseStatus) DeepCopy() *StarRocksWarehouseStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StarRocksWarehouseStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVolume) DeepCopyInto(out *StorageVolume) {
*out = *in
@@ -948,3 +1047,31 @@ func (in *StorageVolume) DeepCopy() *StorageVolume {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WarehouseComponentSpec) DeepCopyInto(out *WarehouseComponentSpec) {
+ *out = *in
+ in.StarRocksComponentSpec.DeepCopyInto(&out.StarRocksComponentSpec)
+ if in.EnvVars != nil {
+ in, out := &in.EnvVars, &out.EnvVars
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AutoScalingPolicy != nil {
+ in, out := &in.AutoScalingPolicy, &out.AutoScalingPolicy
+ *out = new(AutoScalingPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WarehouseComponentSpec.
+func (in *WarehouseComponentSpec) DeepCopy() *WarehouseComponentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(WarehouseComponentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/common/resource_utils/configmap.go b/pkg/common/resource_utils/configmap.go
index fed94aa9..21110463 100644
--- a/pkg/common/resource_utils/configmap.go
+++ b/pkg/common/resource_utils/configmap.go
@@ -66,19 +66,19 @@ func ResolveConfigMap(configMap *corev1.ConfigMap, key string) (map[string]inter
if _, ok := data[key]; !ok {
return res, nil
}
-
value := data[key]
- viper.SetConfigType("properties")
- err := viper.ReadConfig(bytes.NewBuffer([]byte(value)))
- if err != nil {
+ // We use a new viper instance, not the global one, in order to avoid concurrency problems: concurrent map iteration
+ // and map write,
+ v := viper.New()
+ v.SetConfigType("properties")
+ if err := v.ReadConfig(bytes.NewBuffer([]byte(value))); err != nil {
return nil, err
}
-
- return viper.AllSettings(), nil
+ return v.AllSettings(), nil
}
-// getPort get ports from config file.
+// GetPort get ports from config file.
func GetPort(config map[string]interface{}, key string) int32 {
if v, ok := config[key]; ok {
if port, err := strconv.ParseInt(v.(string), 10, 32); err == nil {
diff --git a/pkg/common/resource_utils/service.go b/pkg/common/resource_utils/service.go
index a8779c82..d4d44152 100644
--- a/pkg/common/resource_utils/service.go
+++ b/pkg/common/resource_utils/service.go
@@ -18,21 +18,14 @@ import (
srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/constant"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/hash"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
)
-type StarRocksServiceType string
-
-const (
- FeService StarRocksServiceType = "fe"
- BeService StarRocksServiceType = "be"
- CnService StarRocksServiceType = "cn"
- FeProxyService StarRocksServiceType = "fe-proxy"
-)
-
const (
FeHTTPPortName = "http"
FeRPCPortName = "rpc"
@@ -64,14 +57,14 @@ type hashService struct {
}
// BuildExternalService build the external service. not have selector
-func BuildExternalService(src *srapi.StarRocksCluster, name string, serviceType StarRocksServiceType,
+func BuildExternalService(object object.StarRocksObject, spec srapi.SpecInterface,
config map[string]interface{}, selector map[string]string, labels map[string]string) corev1.Service {
// the k8s service type.
var srPorts []srapi.StarRocksServicePort
svc := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: src.Namespace,
+ Name: service.ExternalServiceName(object.AliasName, spec),
+ Namespace: object.GetNamespace(),
Labels: labels,
},
Spec: corev1.ServiceSpec{
@@ -79,40 +72,18 @@ func BuildExternalService(src *srapi.StarRocksCluster, name string, serviceType
},
}
- anno := map[string]string{}
- if serviceType == FeService {
- spec := src.Spec.StarRocksFeSpec
- if svc.Name == "" {
- svc.Name = src.Name + "-" + srapi.DEFAULT_FE
- }
- setServiceType(spec.Service, &svc)
- anno = getServiceAnnotations(spec.Service)
- srPorts = getFeServicePorts(config, spec.Service)
- } else if serviceType == BeService {
- spec := src.Spec.StarRocksBeSpec
- if svc.Name == "" {
- svc.Name = src.Name + "-" + srapi.DEFAULT_BE
- }
- setServiceType(spec.Service, &svc)
- anno = getServiceAnnotations(spec.Service)
- srPorts = getBeServicePorts(config, spec.Service)
- } else if serviceType == CnService {
- spec := src.Spec.StarRocksCnSpec
- if svc.Name == "" {
- svc.Name = src.Name + "-" + srapi.DEFAULT_CN
- }
- setServiceType(spec.Service, &svc)
- anno = getServiceAnnotations(spec.Service)
- srPorts = getCnServicePorts(config, spec.Service)
- } else if serviceType == FeProxyService {
- if svc.Name == "" {
- svc.Name = src.Name + "-" + srapi.DEFAULT_FE_PROXY
- }
- feproxySpec := src.Spec.StarRocksFeProxySpec
- setServiceType(feproxySpec.Service, &svc)
- anno = getServiceAnnotations(feproxySpec.Service)
+ setServiceType(spec.GetService(), &svc)
+ anno := getServiceAnnotations(spec.GetService())
+ switch spec.(type) {
+ case *srapi.StarRocksFeSpec:
+ srPorts = getFeServicePorts(config, spec.GetService())
+ case *srapi.StarRocksBeSpec:
+ srPorts = getBeServicePorts(config, spec.GetService())
+ case *srapi.StarRocksCnSpec:
+ srPorts = getCnServicePorts(config, spec.GetService())
+ case *srapi.StarRocksFeProxySpec:
srPorts = []srapi.StarRocksServicePort{
- mergePort(feproxySpec.Service, srapi.StarRocksServicePort{
+ mergePort(spec.GetService(), srapi.StarRocksServicePort{
Name: FE_PORXY_HTTP_PORT_NAME,
Port: FE_PROXY_HTTP_PORT,
ContainerPort: FE_PROXY_HTTP_PORT,
@@ -120,7 +91,7 @@ func BuildExternalService(src *srapi.StarRocksCluster, name string, serviceType
}
}
- ref := metav1.NewControllerRef(src, src.GroupVersionKind())
+ ref := metav1.NewControllerRef(object, object.GroupVersionKind())
svc.OwnerReferences = []metav1.OwnerReference{*ref}
var ports []corev1.ServicePort
diff --git a/pkg/common/resource_utils/service_test.go b/pkg/common/resource_utils/service_test.go
index 5387fa2f..6f8a73b8 100644
--- a/pkg/common/resource_utils/service_test.go
+++ b/pkg/common/resource_utils/service_test.go
@@ -20,6 +20,7 @@ import (
"testing"
srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -62,7 +63,101 @@ func Test_getServiceAnnotations(t *testing.T) {
}
}
-func TestBuildExternalService(t *testing.T) {
+func TestBuildExternalService_ForStarRocksWarehouse(t *testing.T) {
+ warehouse := &srapi.StarRocksWarehouse{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Spec: srapi.StarRocksWarehouseSpec{
+ StarRocksCluster: "test",
+ Template: &srapi.WarehouseComponentSpec{
+ StarRocksComponentSpec: srapi.StarRocksComponentSpec{
+ StarRocksLoadSpec: srapi.StarRocksLoadSpec{
+ Service: &srapi.StarRocksService{
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerIP: "127.0.0.1",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ type args struct {
+ src *srapi.StarRocksWarehouse
+ }
+ tests := []struct {
+ name string
+ args args
+ wantCnService corev1.Service
+ }{
+ {
+ name: "build external service",
+ args: args{
+ src: warehouse,
+ },
+ wantCnService: corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-warehouse-cn-service",
+ Namespace: "default",
+ Annotations: map[string]string{
+ srapi.ComponentResourceHash: "3725082955",
+ },
+ OwnerReferences: func() []metav1.OwnerReference {
+ ref := metav1.NewControllerRef(warehouse, warehouse.GroupVersionKind())
+ return []metav1.OwnerReference{*ref}
+ }(),
+ },
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeLoadBalancer,
+ PublishNotReadyAddresses: false,
+ LoadBalancerIP: "127.0.0.1",
+ Ports: func() []corev1.ServicePort {
+ srPorts := getCnServicePorts(map[string]interface{}{}, nil)
+ var ports []corev1.ServicePort
+ for _, sp := range srPorts {
+ servicePort := corev1.ServicePort{
+ Name: sp.Name,
+ Port: sp.Port,
+ NodePort: sp.NodePort,
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.FromInt(int(sp.ContainerPort)),
+ }
+ ports = append(ports, servicePort)
+ }
+ return ports
+ }(),
+ },
+ },
+ },
+ }
+
+ equal := func(got, want corev1.Service) {
+ gotData, _ := json.Marshal(got)
+ wantData, _ := json.Marshal(want)
+ if len(gotData) != len(wantData) {
+ t.Errorf("BuildExternalService() = %v, want %v", got, want)
+ return
+ }
+ for i := range gotData {
+ if gotData[i] != wantData[i] {
+ t.Errorf("BuildExternalService() = %v, want %v", got, want)
+ return
+ }
+ }
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotCnService := BuildExternalService(object.NewFromWarehouse(warehouse),
+ warehouse.Spec.Template.ToCnSpec(), map[string]interface{}{}, map[string]string{}, map[string]string{})
+ equal(gotCnService, tt.wantCnService)
+ })
+ }
+}
+
+func TestBuildExternalService_ForStarRocksCluster(t *testing.T) {
src := &srapi.StarRocksCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
@@ -79,37 +174,50 @@ func TestBuildExternalService(t *testing.T) {
},
},
},
+ StarRocksBeSpec: &srapi.StarRocksBeSpec{
+ StarRocksComponentSpec: srapi.StarRocksComponentSpec{
+ StarRocksLoadSpec: srapi.StarRocksLoadSpec{
+ Service: &srapi.StarRocksService{
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerIP: "127.0.0.1",
+ },
+ },
+ },
+ },
+ StarRocksCnSpec: &srapi.StarRocksCnSpec{
+ StarRocksComponentSpec: srapi.StarRocksComponentSpec{
+ StarRocksLoadSpec: srapi.StarRocksLoadSpec{
+ Service: &srapi.StarRocksService{
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerIP: "127.0.0.1",
+ },
+ },
+ },
+ },
},
}
+
type args struct {
- src *srapi.StarRocksCluster
- name string
- serviceType StarRocksServiceType
- config map[string]interface{}
- selector map[string]string
- labels map[string]string
+ src *srapi.StarRocksCluster
}
tests := []struct {
- name string
- args args
- want corev1.Service
+ name string
+ args args
+ wantFeService corev1.Service
+ wantBeService corev1.Service
+ wantCnService corev1.Service
}{
{
- name: "test build external service",
+ name: "build external service",
args: args{
- src: src,
- name: "service-name",
- serviceType: FeService,
- config: map[string]interface{}{},
- selector: map[string]string{},
- labels: map[string]string{},
+ src: src,
},
- want: corev1.Service{
+ wantFeService: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: "service-name",
+ Name: "test-fe-service",
Namespace: "default",
Annotations: map[string]string{
- srapi.ComponentResourceHash: "1503664666",
+ srapi.ComponentResourceHash: "2802874283",
},
OwnerReferences: func() []metav1.OwnerReference {
ref := metav1.NewControllerRef(src, src.GroupVersionKind())
@@ -140,23 +248,102 @@ func TestBuildExternalService(t *testing.T) {
}(),
},
},
+ wantBeService: corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-be-service",
+ Namespace: "default",
+ Annotations: map[string]string{
+ srapi.ComponentResourceHash: "820013195",
+ },
+ OwnerReferences: func() []metav1.OwnerReference {
+ ref := metav1.NewControllerRef(src, src.GroupVersionKind())
+ return []metav1.OwnerReference{*ref}
+ }(),
+ },
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeLoadBalancer,
+ PublishNotReadyAddresses: false,
+ LoadBalancerIP: "127.0.0.1",
+ Ports: func() []corev1.ServicePort {
+ srPorts := getBeServicePorts(map[string]interface{}{}, nil)
+ var ports []corev1.ServicePort
+ for _, sp := range srPorts {
+ servicePort := corev1.ServicePort{
+ Name: sp.Name,
+ Port: sp.Port,
+ NodePort: sp.NodePort,
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.FromInt(int(sp.ContainerPort)),
+ }
+ ports = append(ports, servicePort)
+ }
+ return ports
+ }(),
+ },
+ },
+ wantCnService: corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cn-service",
+ Namespace: "default",
+ Annotations: map[string]string{
+ srapi.ComponentResourceHash: "2894907321",
+ },
+ OwnerReferences: func() []metav1.OwnerReference {
+ ref := metav1.NewControllerRef(src, src.GroupVersionKind())
+ return []metav1.OwnerReference{*ref}
+ }(),
+ },
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeLoadBalancer,
+ PublishNotReadyAddresses: false,
+ LoadBalancerIP: "127.0.0.1",
+ Ports: func() []corev1.ServicePort {
+ srPorts := getCnServicePorts(map[string]interface{}{}, nil)
+ var ports []corev1.ServicePort
+ for _, sp := range srPorts {
+ servicePort := corev1.ServicePort{
+ Name: sp.Name,
+ Port: sp.Port,
+ NodePort: sp.NodePort,
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.FromInt(int(sp.ContainerPort)),
+ }
+ ports = append(ports, servicePort)
+ }
+ return ports
+ }(),
+ },
+ },
},
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := BuildExternalService(tt.args.src, tt.args.name, tt.args.serviceType, tt.args.config, tt.args.selector, tt.args.labels)
- gotData, _ := json.Marshal(got)
- wantData, _ := json.Marshal(tt.want)
- if len(gotData) != len(wantData) {
- t.Errorf("BuildExternalService() = %v, want %v", got, tt.want)
+
+ equal := func(got, want corev1.Service) {
+ gotData, _ := json.Marshal(got)
+ wantData, _ := json.Marshal(want)
+ if len(gotData) != len(wantData) {
+ t.Errorf("BuildExternalService() = %v, want %v", got, want)
+ return
+ }
+ for i := range gotData {
+ if gotData[i] != wantData[i] {
+ t.Errorf("BuildExternalService() = %v, want %v", got, want)
return
}
- for i := range gotData {
- if gotData[i] != wantData[i] {
- t.Errorf("BuildExternalService() = %v, want %v", got, tt.want)
- return
- }
- }
+ }
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ object := object.NewFromCluster(src)
+ gotFeService := BuildExternalService(object, src.Spec.StarRocksFeSpec,
+ map[string]interface{}{}, map[string]string{}, map[string]string{})
+ equal(gotFeService, tt.wantFeService)
+ gotBeService := BuildExternalService(object, src.Spec.StarRocksBeSpec,
+ map[string]interface{}{}, map[string]string{}, map[string]string{})
+ equal(gotBeService, tt.wantBeService)
+ gotCnService := BuildExternalService(object, src.Spec.StarRocksCnSpec,
+ map[string]interface{}{}, map[string]string{}, map[string]string{})
+ equal(gotCnService, tt.wantCnService)
})
}
}
diff --git a/pkg/controllers.go b/pkg/controllers.go
index 24f43596..fc4a927f 100644
--- a/pkg/controllers.go
+++ b/pkg/controllers.go
@@ -21,13 +21,10 @@ import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- ctrl "sigs.k8s.io/controller-runtime"
)
var (
- // Controllers through the init for add Controller.
- Controllers []Controller
- Scheme = runtime.NewScheme()
+ Scheme = runtime.NewScheme()
)
func init() {
@@ -36,6 +33,17 @@ func init() {
// +kubebuilder:scaffold:scheme
}
-type Controller interface {
- Init(mgr ctrl.Manager)
+// GetPhaseFromComponent return the Phase of Cluster or Warehouse based on the component status.
+// It returns empty string if not sure the phase.
+func GetPhaseFromComponent(componentStatus *v1.StarRocksComponentStatus) v1.Phase {
+ if componentStatus == nil {
+ return ""
+ }
+ if componentStatus.Phase == v1.ComponentReconciling {
+ return v1.ClusterPending
+ }
+ if componentStatus.Phase == v1.ComponentFailed {
+ return v1.ClusterFailed
+ }
+ return ""
}
diff --git a/pkg/k8sutils/k8sutils.go b/pkg/k8sutils/k8sutils.go
index c6e23e3d..ec455aeb 100644
--- a/pkg/k8sutils/k8sutils.go
+++ b/pkg/k8sutils/k8sutils.go
@@ -40,10 +40,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-// judge two services equal or not in some fields. develoer can custom the function.
+// ServiceEqual judges two services equal or not in some fields. developer can custom the function.
type ServiceEqual func(svc1 *corev1.Service, svc2 *corev1.Service) bool
-// judge two statefulset equal or not in some fields. develoer can custom the function.
+// StatefulSetEqual judges two statefulset equal or not in some fields. developer can custom the function.
type StatefulSetEqual func(st1 *appv1.StatefulSet, st2 *appv1.StatefulSet) bool
func ApplyService(ctx context.Context, k8sclient client.Client, svc *corev1.Service, equal ServiceEqual) error {
@@ -57,8 +57,8 @@ func ApplyService(ctx context.Context, k8sclient client.Client, svc *corev1.Serv
}
if equal(svc, &esvc) {
- klog.Info("CreateOrUpdateService service Name, Ports, Selector, ServiceType, Labels have not change ",
- "namespace ", svc.Namespace, " name ", svc.Name)
+ klog.Info("Apply service Name, Ports, Selector, ServiceType, Labels have not change ", "namespace ",
+ svc.Namespace, " name ", svc.Name)
return nil
}
@@ -130,11 +130,11 @@ func ApplyConfigMap(ctx context.Context, k8sClient client.Client, configmap *cor
}
// ApplyStatefulSet when the object is not exist, create object. if exist and statefulset have been updated, patch the statefulset.
-func ApplyStatefulSet(ctx context.Context, k8sclient client.Client, st *appv1.StatefulSet, equal StatefulSetEqual) error {
+func ApplyStatefulSet(ctx context.Context, k8sClient client.Client, st *appv1.StatefulSet, equal StatefulSetEqual) error {
var est appv1.StatefulSet
- err := k8sclient.Get(ctx, types.NamespacedName{Namespace: st.Namespace, Name: st.Name}, &est)
+ err := k8sClient.Get(ctx, types.NamespacedName{Namespace: st.Namespace, Name: st.Name}, &est)
if err != nil && apierrors.IsNotFound(err) {
- return CreateClientObject(ctx, k8sclient, st)
+ return CreateClientObject(ctx, k8sClient, st)
} else if err != nil {
return err
}
@@ -152,68 +152,48 @@ func ApplyStatefulSet(ctx context.Context, k8sclient client.Client, st *appv1.St
}
st.ResourceVersion = est.ResourceVersion
- return UpdateClientObject(ctx, k8sclient, st)
+ return UpdateClientObject(ctx, k8sClient, st)
}
-func CreateClientObject(ctx context.Context, k8sclient client.Client, object client.Object) error {
- klog.Info("Creating resource service ", "namespace ", object.GetNamespace(), " name ", object.GetName(),
- " kind ", object.GetObjectKind().GroupVersionKind().Kind)
- if err := k8sclient.Create(ctx, object); err != nil {
+func CreateClientObject(ctx context.Context, k8sClient client.Client, object client.Object) error {
+ klog.Infof("Creating k8s resource namespace=%s, name=%s, kind=%s", object.GetNamespace(), object.GetName(),
+ object.GetObjectKind().GroupVersionKind().Kind)
+ if err := k8sClient.Create(ctx, object); err != nil {
return err
}
return nil
}
-func UpdateClientObject(ctx context.Context, k8sclient client.Client, object client.Object) error {
+func UpdateClientObject(ctx context.Context, k8sClient client.Client, object client.Object) error {
klog.Info("Updating resource service ", "namespace ", object.GetNamespace(), " name ", object.GetName(),
" kind ", object.GetObjectKind())
- if err := k8sclient.Update(ctx, object); err != nil {
+ if err := k8sClient.Update(ctx, object); err != nil {
return err
}
return nil
}
// PatchClientObject patch object when the object exist. if not return error.
-func PatchClientObject(ctx context.Context, k8sclient client.Client, object client.Object) error {
+func PatchClientObject(ctx context.Context, k8sClient client.Client, object client.Object) error {
klog.V(constant.LOG_LEVEL).Infof("patch resource namespace=%s,name=%s,kind=%s.",
object.GetNamespace(), object.GetName(), object.GetObjectKind())
- if err := k8sclient.Patch(ctx, object, client.Merge); err != nil {
+ if err := k8sClient.Patch(ctx, object, client.Merge); err != nil {
return err
}
return nil
}
-// CreateOrUpdate patch object if not exist create object.
-func CreateOrUpdate(ctx context.Context, k8sclient client.Client, object client.Object) error {
- klog.V(constant.LOG_LEVEL).Infof("patch or create resource namespace=%s,name=%s,kind=%s.",
- object.GetNamespace(), object.GetName(), object.GetObjectKind())
- if err := k8sclient.Update(ctx, object); apierrors.IsNotFound(err) {
- return k8sclient.Create(ctx, object)
- } else if err != nil {
- return err
- }
-
- return nil
-}
-
-func DeleteClientObject(ctx context.Context, k8sclient client.Client, object client.Object) error {
- if err := k8sclient.Delete(ctx, object); err != nil {
- return err
- }
- return nil
-}
-
// DeleteStatefulset delete statefulset.
-func DeleteStatefulset(ctx context.Context, k8sclient client.Client, namespace, name string) error {
+func DeleteStatefulset(ctx context.Context, k8sClient client.Client, namespace, name string) error {
var st appv1.StatefulSet
- if err := k8sclient.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, &st); apierrors.IsNotFound(err) {
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, &st); apierrors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
- return k8sclient.Delete(ctx, &st)
+ return k8sClient.Delete(ctx, &st)
}
// DeleteService delete service.
@@ -337,3 +317,65 @@ func GetKubernetesVersion() error {
KUBE_MINOR_VERSION = version.Minor
return nil
}
+
+// GetEnvVarValue returns the value of an environment variable. It handles both Value and ValueFrom cases.
+// It assumes that the environment variable exists and is valid.
+func GetEnvVarValue(k8sClient client.Client, namespace string, envVar corev1.EnvVar) (string, error) {
+ if envVar.Value != "" {
+ // If Value is not empty, return it directly
+ return envVar.Value, nil
+ } else if envVar.ValueFrom != nil {
+ // If ValueFrom is not nil, handle different sources
+ valueFrom := envVar.ValueFrom
+ if valueFrom.ConfigMapKeyRef != nil {
+ // If ConfigMapKeyRef is not nil, get the value from the configmap's key
+ name := valueFrom.ConfigMapKeyRef.Name
+ key := valueFrom.ConfigMapKeyRef.Key
+ return getValueFromConfigmap(k8sClient, namespace, name, key)
+ } else if valueFrom.SecretKeyRef != nil {
+ // If SecretKeyRef is not nil, get the value from the secret's key
+ name := valueFrom.SecretKeyRef.Name
+ key := valueFrom.SecretKeyRef.Key
+ return getValueFromSecret(k8sClient, namespace, name, key)
+ }
+ }
+ return "", fmt.Errorf("invalid environment variable: %v", envVar)
+}
+
+// getValueFromConfigmap returns the runtime value of a key in a configmap.
+// It assumes that the configmap and the key exist and are valid.
+func getValueFromConfigmap(k8sClient client.Client, namespace string, name string, key string) (string, error) {
+ var configMap corev1.ConfigMap
+ err := k8sClient.Get(context.Background(),
+ types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }, &configMap)
+ if err != nil {
+ return "", err
+ }
+ value, ok := configMap.Data[key]
+ if !ok {
+ return "", fmt.Errorf("key %s not found in configmap %s", key, name)
+ }
+ return value, nil
+}
+
+// getValueFromSecret returns the value of a key in a secret.
+// It assumes that the secret and the key exist and are valid.
+func getValueFromSecret(k8sClient client.Client, namespace string, name string, key string) (string, error) {
+ var secret corev1.Secret
+ err := k8sClient.Get(context.Background(),
+ types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }, &secret)
+ if err != nil {
+ return "", err
+ }
+ value, ok := secret.Data[key]
+ if !ok {
+ return "", fmt.Errorf("key %s not found in secret %s", key, name)
+ }
+ return string(value), nil
+}
diff --git a/pkg/k8sutils/k8sutils_test.go b/pkg/k8sutils/k8sutils_test.go
index 8d577d2b..8859f6ac 100644
--- a/pkg/k8sutils/k8sutils_test.go
+++ b/pkg/k8sutils/k8sutils_test.go
@@ -23,12 +23,14 @@ import (
v1 "k8s.io/api/autoscaling/v1"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/api/autoscaling/v2beta2"
+ corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
@@ -37,7 +39,7 @@ var (
)
func init() {
- groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1alpha1"}
+ groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
schemeBuilder := &scheme.Builder{GroupVersion: groupVersion}
@@ -68,35 +70,227 @@ func Test_DeleteAutoscaler(t *testing.T) {
},
}
- k8sclient := NewFakeClient(sch, &v1autoscaler, &v2autoscaler, &v2beta2Autoscaler)
- // confirm the v1autoscaler is exist.
+ k8sClient := NewFakeClient(sch, &v1autoscaler, &v2autoscaler, &v2beta2Autoscaler)
+ // confirm the v1.autoscaler exist.
var cv1autoscaler v1.HorizontalPodAutoscaler
- cerr := k8sclient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &cv1autoscaler)
+ cerr := k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &cv1autoscaler)
require.Equal(t, nil, cerr)
require.Equal(t, "test", cv1autoscaler.Name)
- delerr := DeleteAutoscaler(context.Background(), k8sclient, "default", "test", srapi.AutoScalerV1)
+ delerr := DeleteAutoscaler(context.Background(), k8sClient, "default", "test", srapi.AutoScalerV1)
require.Equal(t, nil, delerr)
var ev1autoscaler v1.HorizontalPodAutoscaler
- geterr := k8sclient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &ev1autoscaler)
+ geterr := k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &ev1autoscaler)
require.True(t, apierrors.IsNotFound(geterr))
var cv2autoscaler v2.HorizontalPodAutoscaler
- cerr = k8sclient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &cv2autoscaler)
+ cerr = k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &cv2autoscaler)
require.Equal(t, nil, cerr)
require.Equal(t, "test", v2autoscaler.Name)
- delerr = DeleteAutoscaler(context.Background(), k8sclient, "default", "test", srapi.AutoScalerV2)
+ delerr = DeleteAutoscaler(context.Background(), k8sClient, "default", "test", srapi.AutoScalerV2)
require.Equal(t, nil, delerr)
var ev2autoscaler v2.HorizontalPodAutoscaler
- geterr = k8sclient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &ev2autoscaler)
+ geterr = k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &ev2autoscaler)
require.True(t, apierrors.IsNotFound(geterr))
var cv2beta2autoscaler v2beta2.HorizontalPodAutoscaler
- cerr = k8sclient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &cv2beta2autoscaler)
+ cerr = k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &cv2beta2autoscaler)
require.Equal(t, nil, cerr)
require.Equal(t, "test", cv2beta2autoscaler.Name)
- delerr = DeleteAutoscaler(context.Background(), k8sclient, "default", "test", srapi.AutoScalerV2Beta2)
+ delerr = DeleteAutoscaler(context.Background(), k8sClient, "default", "test", srapi.AutoScalerV2Beta2)
require.Equal(t, nil, delerr)
var ev2beta2autoscaler v2beta2.HorizontalPodAutoscaler
- geterr = k8sclient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &ev2beta2autoscaler)
+ geterr = k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, &ev2beta2autoscaler)
require.True(t, apierrors.IsNotFound(geterr))
}
+
+func Test_getValueFromConfigmap(t *testing.T) {
+ type args struct {
+ k8sClient client.Client
+ namespace string
+ name string
+ key string
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "get value from configmap",
+ args: args{
+ k8sClient: NewFakeClient(sch, &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Data: map[string]string{
+ "file.txt": "hell world",
+ },
+ }),
+ namespace: "default",
+ name: "test",
+ key: "file.txt",
+ },
+ want: "hell world",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := getValueFromConfigmap(tt.args.k8sClient, tt.args.namespace, tt.args.name, tt.args.key)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getValueFromConfigmap() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("getValueFromConfigmap() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_getValueFromSecret(t *testing.T) {
+ type args struct {
+ k8sClient client.Client
+ namespace string
+ name string
+ key string
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "get value from secret",
+ args: args{
+ k8sClient: NewFakeClient(sch, &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Data: map[string][]byte{
+ "file.txt": []byte("hell world"),
+ },
+ }),
+ namespace: "default",
+ name: "test",
+ key: "file.txt",
+ },
+ want: "hell world",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := getValueFromSecret(tt.args.k8sClient, tt.args.namespace, tt.args.name, tt.args.key)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getValueFromSecret() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("getValueFromSecret() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestGetEnvVarValue(t *testing.T) {
+ type args struct {
+ k8sClient client.Client
+ namespace string
+ envVar corev1.EnvVar
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "get value",
+ args: args{
+ k8sClient: NewFakeClient(sch),
+ namespace: "default",
+ envVar: corev1.EnvVar{
+ Name: "test",
+ Value: "hw",
+ },
+ },
+ want: "hw",
+ wantErr: false,
+ },
+ {
+ name: "get value from configmap",
+ args: args{
+ k8sClient: NewFakeClient(sch,
+ &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Data: map[string]string{
+ "configmap": "hello",
+ },
+ }),
+ namespace: "default",
+ envVar: corev1.EnvVar{
+ Name: "test",
+ ValueFrom: &corev1.EnvVarSource{
+ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: "configmap",
+ },
+ },
+ },
+ },
+ want: "hello",
+ wantErr: false,
+ },
+ {
+ name: "get value from secret",
+ args: args{
+ k8sClient: NewFakeClient(sch,
+ &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Data: map[string][]byte{
+ "secret": []byte("world"),
+ },
+ }),
+ namespace: "default",
+ envVar: corev1.EnvVar{
+ Name: "test",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test",
+ },
+ Key: "secret",
+ },
+ },
+ },
+ },
+ want: "world",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := GetEnvVarValue(tt.args.k8sClient, tt.args.namespace, tt.args.envVar)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetEnvVarValue() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("GetEnvVarValue() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/k8sutils/templates/object/meta.go b/pkg/k8sutils/templates/object/meta.go
new file mode 100644
index 00000000..36c781a7
--- /dev/null
+++ b/pkg/k8sutils/templates/object/meta.go
@@ -0,0 +1,53 @@
+package object
+
+import (
+ srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ StarRocksClusterKind = "StarRocksCluster"
+ StarRocksWarehouseKind = "StarRocksWarehouse"
+)
+
+// StarRocksObject is a wrapper of metav1.TypeMeta and metav1.ObjectMeta for StarRocksCluster and StarRocksWarehouse.
+type StarRocksObject struct {
+ *metav1.TypeMeta
+ *metav1.ObjectMeta
+
+ // ClusterName is the name of StarRocksCluster.
+ ClusterName string
+
+ // Kind is StarRocksCluster or StarRocksWarehouse.
+ // The reason why we need this field is that we can't make sure ObjectMeta.Kind is filled.
+ Kind string
+
+ // AliasName represents the prefix of subresource names for cn component. The reason is that when the name of
+ // StarRocksWarehouse is the same as the name of StarRocksCluster, operator should avoid to create the same name
+ // StatefulSet, Service, etc.
+ AliasName string
+}
+
+func NewFromCluster(cluster *srapi.StarRocksCluster) StarRocksObject {
+ return StarRocksObject{
+ TypeMeta: &cluster.TypeMeta,
+ ObjectMeta: &cluster.ObjectMeta,
+ ClusterName: cluster.Name,
+ Kind: StarRocksClusterKind,
+ AliasName: cluster.Name,
+ }
+}
+
+func NewFromWarehouse(warehouse *srapi.StarRocksWarehouse) StarRocksObject {
+ return StarRocksObject{
+ TypeMeta: &warehouse.TypeMeta,
+ ObjectMeta: &warehouse.ObjectMeta,
+ ClusterName: warehouse.Spec.StarRocksCluster,
+ Kind: StarRocksWarehouseKind,
+ AliasName: GetAliasName(warehouse.Name), // add a suffix to avoid name conflict with cluster
+ }
+}
+
+func GetAliasName(warehouseName string) string {
+ return warehouseName + "-warehouse"
+}
diff --git a/pkg/k8sutils/templates/pod/spec.go b/pkg/k8sutils/templates/pod/spec.go
index 40e68304..9516fc27 100644
--- a/pkg/k8sutils/templates/pod/spec.go
+++ b/pkg/k8sutils/templates/pod/spec.go
@@ -387,7 +387,7 @@ func Ports(spec v1.SpecInterface, config map[string]interface{}) []corev1.Contai
return ports
}
-func Spec(spec v1.SpecInterface, defaultServiceAccount string, container corev1.Container, volumes []corev1.Volume) corev1.PodSpec {
+func Spec(spec v1.SpecInterface, container corev1.Container, volumes []corev1.Volume) corev1.PodSpec {
podSpec := corev1.PodSpec{
Containers: []corev1.Container{container},
Volumes: volumes,
@@ -401,9 +401,6 @@ func Spec(spec v1.SpecInterface, defaultServiceAccount string, container corev1.
SchedulerName: spec.GetSchedulerName(),
AutomountServiceAccountToken: func() *bool { b := false; return &b }(),
}
- if podSpec.ServiceAccountName == "" {
- podSpec.ServiceAccountName = defaultServiceAccount
- }
return podSpec
}
diff --git a/pkg/k8sutils/templates/pod/spec_test.go b/pkg/k8sutils/templates/pod/spec_test.go
index 09c51829..b6ad8ba2 100644
--- a/pkg/k8sutils/templates/pod/spec_test.go
+++ b/pkg/k8sutils/templates/pod/spec_test.go
@@ -698,10 +698,9 @@ func TestEnvs(t *testing.T) {
func TestSpec(t *testing.T) {
type args struct {
- spec v1.SpecInterface
- defaultServiceAccount string
- container corev1.Container
- volumes []corev1.Volume
+ spec v1.SpecInterface
+ container corev1.Container
+ volumes []corev1.Volume
}
tests := []struct {
name string
@@ -718,9 +717,8 @@ func TestSpec(t *testing.T) {
},
},
},
- defaultServiceAccount: "default",
- container: corev1.Container{},
- volumes: nil,
+ container: corev1.Container{},
+ volumes: nil,
},
want: corev1.PodSpec{
Containers: []corev1.Container{{}},
@@ -732,14 +730,12 @@ func TestSpec(t *testing.T) {
{
name: "test service account name 2 in spec",
args: args{
- spec: &v1.StarRocksFeSpec{},
- defaultServiceAccount: "default",
- container: corev1.Container{},
- volumes: nil,
+ spec: &v1.StarRocksFeSpec{},
+ container: corev1.Container{},
+ volumes: nil,
},
want: corev1.PodSpec{
Containers: []corev1.Container{{}},
- ServiceAccountName: "default",
TerminationGracePeriodSeconds: rutils.GetInt64ptr(int64(120)),
AutomountServiceAccountToken: func() *bool { b := false; return &b }(),
},
@@ -747,7 +743,7 @@ func TestSpec(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := Spec(tt.args.spec, tt.args.defaultServiceAccount, tt.args.container, tt.args.volumes); !reflect.DeepEqual(got, tt.want) {
+ if got := Spec(tt.args.spec, tt.args.container, tt.args.volumes); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Spec() = %#v, want %#v", got, tt.want)
}
})
diff --git a/pkg/k8sutils/templates/statefulset/spec.go b/pkg/k8sutils/templates/statefulset/spec.go
index 0ed88fe5..b5b69f7a 100644
--- a/pkg/k8sutils/templates/statefulset/spec.go
+++ b/pkg/k8sutils/templates/statefulset/spec.go
@@ -18,6 +18,7 @@ import (
v1 "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ srobject "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -25,6 +26,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
+const STARROCKS_WAREHOUSE_FINALIZER = "starrocks.com.starrockswarehouse/protection"
+
func PVCList(volumes []v1.StorageVolume) []corev1.PersistentVolumeClaim {
var pvcs []corev1.PersistentVolumeClaim
for _, vm := range volumes {
@@ -47,22 +50,22 @@ func PVCList(volumes []v1.StorageVolume) []corev1.PersistentVolumeClaim {
}
// MakeStatefulset statefulset
-func MakeStatefulset(cluster *v1.StarRocksCluster, spec v1.SpecInterface, podTemplateSpec corev1.PodTemplateSpec) appv1.StatefulSet {
+func MakeStatefulset(object srobject.StarRocksObject, spec v1.SpecInterface, podTemplateSpec corev1.PodTemplateSpec) appv1.StatefulSet {
const defaultRollingUpdateStartPod int32 = 0
// TODO: statefulset only allow update 'replicas', 'template', 'updateStrategy'
- or := metav1.NewControllerRef(cluster, cluster.GroupVersionKind())
+ or := metav1.NewControllerRef(object, object.GroupVersionKind())
st := appv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
- Name: load.Name(cluster.Name, spec),
- Namespace: cluster.Namespace,
+ Name: load.Name(object.AliasName, spec),
+ Namespace: object.Namespace,
Annotations: load.Annotations(),
- Labels: load.Labels(cluster.Name, spec),
+ Labels: load.Labels(object.AliasName, spec),
OwnerReferences: []metav1.OwnerReference{*or},
},
Spec: appv1.StatefulSetSpec{
Replicas: spec.GetReplicas(),
Selector: &metav1.LabelSelector{
- MatchLabels: load.Selector(cluster.Name, spec),
+ MatchLabels: load.Selector(object.AliasName, spec),
},
UpdateStrategy: appv1.StatefulSetUpdateStrategy{
Type: appv1.RollingUpdateStatefulSetStrategyType,
@@ -71,11 +74,17 @@ func MakeStatefulset(cluster *v1.StarRocksCluster, spec v1.SpecInterface, podTem
},
},
Template: podTemplateSpec,
- ServiceName: service.SearchServiceName(cluster.Name, spec),
+ ServiceName: service.SearchServiceName(object.AliasName, spec),
VolumeClaimTemplates: PVCList(spec.GetStorageVolumes()),
PodManagementPolicy: appv1.ParallelPodManagement,
},
}
+ // When Warehouse CR is deleted, operator need to get some environments from the statefulset to
+ // execute dropping warehouse statement.
+ if object.Kind == srobject.StarRocksWarehouseKind {
+ st.Finalizers = append(st.Finalizers, STARROCKS_WAREHOUSE_FINALIZER)
+ }
+
return st
}
diff --git a/pkg/k8sutils/templates/statefulset/spec_test.go b/pkg/k8sutils/templates/statefulset/spec_test.go
index ce0689e1..f6e072c7 100644
--- a/pkg/k8sutils/templates/statefulset/spec_test.go
+++ b/pkg/k8sutils/templates/statefulset/spec_test.go
@@ -21,6 +21,7 @@ import (
v1 "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -169,7 +170,7 @@ func TestMakeStatefulset(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got := MakeStatefulset(&tt.args.cluster, tt.args.spec, corev1.PodTemplateSpec{})
+ got := MakeStatefulset(object.NewFromCluster(&tt.args.cluster), tt.args.spec, corev1.PodTemplateSpec{})
got.OwnerReferences = nil
if !reflect.DeepEqual(got.ObjectMeta, tt.want.ObjectMeta) {
t.Errorf("MakeStatefulset ObjectMeta = %v, want %v", got.ObjectMeta, tt.want.ObjectMeta)
diff --git a/pkg/starrockscluster_controller.go b/pkg/starrockscluster_controller.go
index 4dfeccba..2396835c 100644
--- a/pkg/starrockscluster_controller.go
+++ b/pkg/starrockscluster_controller.go
@@ -18,7 +18,6 @@ package pkg
import (
"context"
- "os"
srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/hash"
@@ -40,10 +39,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-func init() {
- Controllers = append(Controllers, &StarRocksClusterReconciler{})
-}
-
var (
name = "starrockscluster-controller"
feControllerName = "fe-controller"
@@ -56,7 +51,7 @@ var (
type StarRocksClusterReconciler struct {
client.Client
Recorder record.EventRecorder
- Scs map[string]sub_controller.SubController
+ Scs map[string]sub_controller.ClusterSubController
}
// +kubebuilder:rbac:groups=starrocks.com,resources=starrocksclusters,verbs=get;list;watch;create;update;patch;delete
@@ -75,11 +70,6 @@ type StarRocksClusterReconciler struct {
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
-// TODO(user): Modify the Reconcile function to compare the state specified by
-// the StarRocksCluster object against the actual cluster state, and then
-// perform operations to make the cluster state reflect the state specified by
-// the user.
-//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile
func (r *StarRocksClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
@@ -108,10 +98,10 @@ func (r *StarRocksClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, nil
}
- // subControllers reconcile for create or update sub resource.
+ // subControllers reconcile for create or update component.
for _, rc := range r.Scs {
- if err := rc.Sync(ctx, src); err != nil {
- klog.Errorf("StarRocksClusterReconciler reconcile sub resource reconcile failed, "+
+ if err := rc.SyncCluster(ctx, src); err != nil {
+ klog.Errorf("StarRocksClusterReconciler reconcile component failed, "+
"namespace=%v, name=%v, controller=%v, error=%v", src.Namespace, src.Name, rc.GetControllerName(), err)
return requeueIfError(err)
}
@@ -124,7 +114,7 @@ func (r *StarRocksClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
for _, rc := range r.Scs {
// update component status.
- if err := rc.UpdateStatus(src); err != nil {
+ if err := rc.UpdateClusterStatus(src); err != nil {
klog.Infof("StarRocksClusterReconciler reconcile update component %s status failed.err=%s\n", rc.GetControllerName(), err.Error())
return requeueIfError(err)
}
@@ -164,7 +154,7 @@ func (r *StarRocksClusterReconciler) PatchStarRocksCluster(ctx context.Context,
})
}
-// UpdateStarRocksCluster udpate the starrockscluster metadata, spec.
+// UpdateStarRocksCluster update the starrockscluster metadata, spec.
func (r *StarRocksClusterReconciler) UpdateStarRocksCluster(ctx context.Context, src *srapi.StarRocksCluster) error {
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var esrc srapi.StarRocksCluster
@@ -195,7 +185,7 @@ func (r *StarRocksClusterReconciler) hashStarRocksCluster(src *srapi.StarRocksCl
func (r *StarRocksClusterReconciler) reconcileStatus(ctx context.Context, src *srapi.StarRocksCluster) {
// calculate the status of starrocks cluster by subresource's status.
- // clear resources when sub resource deleted. example: deployed fe,be,cn, when cn spec is deleted we should delete cn resources.
+ // clear resources when component deleted. example: deployed fe,be,cn, when cn spec is deleted we should delete cn resources.
for _, rc := range r.Scs {
if err := rc.ClearResources(ctx, src); err != nil {
klog.Errorf("StarRocksClusterReconciler reconcile clear resource failed, "+
@@ -203,30 +193,25 @@ func (r *StarRocksClusterReconciler) reconcileStatus(ctx context.Context, src *s
}
}
- smap := make(map[srapi.ClusterPhase]bool)
src.Status.Phase = srapi.ClusterRunning
- func() {
- feStatus := src.Status.StarRocksFeStatus
- if feStatus != nil && feStatus.Phase == srapi.ComponentReconciling {
- smap[srapi.ClusterPending] = true
- } else if feStatus != nil && feStatus.Phase == srapi.ComponentFailed {
- smap[srapi.ClusterFailed] = true
+ phase := GetPhaseFromComponent(&src.Status.StarRocksFeStatus.StarRocksComponentStatus)
+ if phase != "" {
+ src.Status.Phase = phase
+ return
+ }
+ if src.Status.StarRocksBeStatus != nil {
+ phase = GetPhaseFromComponent(&src.Status.StarRocksBeStatus.StarRocksComponentStatus)
+ if phase != "" {
+ src.Status.Phase = phase
+ return
}
- }()
-
- func() {
- cnStatus := src.Status.StarRocksCnStatus
- if cnStatus != nil && cnStatus.Phase == srapi.ComponentReconciling {
- smap[srapi.ClusterPending] = true
- } else if cnStatus != nil && cnStatus.Phase == srapi.ComponentFailed {
- smap[srapi.ClusterFailed] = true
+ }
+ if src.Status.StarRocksCnStatus != nil {
+ phase = GetPhaseFromComponent(&src.Status.StarRocksCnStatus.StarRocksComponentStatus)
+ if phase != "" {
+ src.Status.Phase = phase
+ return
}
- }()
-
- if _, ok := smap[srapi.ClusterPending]; ok {
- src.Status.Phase = srapi.ClusterPending
- } else if _, ok := smap[srapi.ClusterFailed]; ok {
- src.Status.Phase = srapi.ClusterFailed
}
}
@@ -243,9 +228,8 @@ func (r *StarRocksClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
Complete(r)
}
-// Init initial the StarRocksClusterReconciler for reconcile.
-func (r *StarRocksClusterReconciler) Init(mgr ctrl.Manager) {
- subcs := make(map[string]sub_controller.SubController)
+func SetupClusterReconciler(mgr ctrl.Manager) error {
+ subcs := make(map[string]sub_controller.ClusterSubController)
feController := fe.New(mgr.GetClient())
subcs[feControllerName] = feController
cnController := cn.New(mgr.GetClient())
@@ -255,14 +239,17 @@ func (r *StarRocksClusterReconciler) Init(mgr ctrl.Manager) {
feProxyController := feproxy.New(mgr.GetClient())
subcs[feProxyControllerName] = feProxyController
- if err := (&StarRocksClusterReconciler{
+ reconciler := &StarRocksClusterReconciler{
Client: mgr.GetClient(),
Recorder: mgr.GetEventRecorderFor(name),
Scs: subcs,
- }).SetupWithManager(mgr); err != nil {
+ }
+
+ if err := reconciler.SetupWithManager(mgr); err != nil {
klog.Error(err, " unable to create controller ", "controller ", "StarRocksCluster ")
- os.Exit(1)
+ return err
}
+ return nil
}
func requeueIfError(err error) (ctrl.Result, error) {
diff --git a/pkg/starrockscluster_controller_test.go b/pkg/starrockscluster_controller_test.go
index b67419b2..61dcd1fe 100644
--- a/pkg/starrockscluster_controller_test.go
+++ b/pkg/starrockscluster_controller_test.go
@@ -43,7 +43,7 @@ func newStarRocksClusterController(objects ...runtime.Object) *StarRocksClusterR
srcController.Client = k8sutils.NewFakeClient(Scheme, objects...)
fc := fe.New(srcController.Client)
cc := cn.New(srcController.Client)
- srcController.Scs = make(map[string]sub_controller.SubController)
+ srcController.Scs = make(map[string]sub_controller.ClusterSubController)
srcController.Scs[feControllerName] = fc
srcController.Scs[cnControllerName] = cc
return srcController
diff --git a/pkg/starrockswarehouse_controller.go b/pkg/starrockswarehouse_controller.go
new file mode 100644
index 00000000..fc03ed3b
--- /dev/null
+++ b/pkg/starrockswarehouse_controller.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2021-present, StarRocks Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pkg
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller/cn"
+ appv1 "k8s.io/api/apps/v1"
+ v2 "k8s.io/api/autoscaling/v2"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/retry"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// StarRocksWarehouseReconciler reconciles a StarRocksWarehouse object
+type StarRocksWarehouseReconciler struct {
+ client.Client
+ recorder record.EventRecorder
+ subControllers []sub_controller.WarehouseSubController
+}
+
+// +kubebuilder:rbac:groups=starrocks.com,resources=starrockswarehouses,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=starrocks.com,resources=starrockswarehouses/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=starrocks.com,resources=starrockswarehouses/finalizers,verbs=update
+// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
+// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
+// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups="core",resources=endpoints,verbs=get;watch;list
+// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile
+func (r *StarRocksWarehouseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ klog.Infof("StarRocksWarehouseReconciler reconcile the StarRocksWarehouse CR, namespace=%v, name=%v", req.Namespace, req.Name)
+
+ klog.Infof("get StarRocksWarehouse CR, namespace=%v, name=%v", req.Namespace, req.Name)
+ warehouse := &srapi.StarRocksWarehouse{}
+ err := r.Client.Get(ctx, req.NamespacedName, warehouse)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ klog.Infof("StarRocksWarehouse CR is not found, begin to clear warehouse, namespace=%v, name=%v",
+ req.Namespace, req.Name)
+ for _, controller := range r.subControllers {
+ if err = controller.ClearWarehouse(ctx, req.Namespace, req.Name); err != nil {
+ klog.Errorf("failed to clear warehouse %s/%s, error=%v", req.Namespace, req.Name, err)
+ }
+ }
+ return ctrl.Result{}, nil
+ }
+ klog.Errorf("failed to get StarRocksWarehouse CR, namespace=%v, name=%v, error=%v", req.Namespace, req.Name, err)
+ return ctrl.Result{}, err
+ }
+
+ if warehouse.Status.WarehouseComponentStatus == nil {
+ warehouse.Status.WarehouseComponentStatus = &srapi.StarRocksCnStatus{
+ StarRocksComponentStatus: srapi.StarRocksComponentStatus{
+ Phase: srapi.ComponentReconciling,
+ },
+ }
+ }
+
+ for _, controller := range r.subControllers {
+ klog.Infof("StarRocksWarehouseReconciler reconcile component, namespace=%v, name=%v, controller=%v",
+ warehouse.Namespace, warehouse.Name, controller.GetControllerName())
+ if err := controller.SyncWarehouse(ctx, warehouse); err != nil {
+ warehouse.Status.Phase = srapi.ComponentFailed
+ if errors.Is(err, cn.SpecMissingError) {
+ reason := fmt.Sprintf("the spec part is invalid %s/%s", warehouse.Namespace, warehouse.Name)
+ warehouse.Status.Reason = reason
+ klog.Info(reason)
+ return ctrl.Result{}, nil
+ } else if errors.Is(err, cn.StarRocksClusterMissingError) {
+ reason := fmt.Sprintf("StarRocksCluster %s/%s not found for %s/%s",
+ warehouse.Namespace, warehouse.Spec.StarRocksCluster, warehouse.Namespace, warehouse.Name)
+ warehouse.Status.Reason = reason
+ klog.Infof(reason)
+ return ctrl.Result{}, nil
+ } else if errors.Is(err, cn.FeNotReadyError) {
+ klog.Infof("StarRocksFe is not ready, %s/%s", warehouse.Namespace, warehouse.Name)
+ return ctrl.Result{}, nil
+ } else if errors.Is(err, cn.GetFeFeatureInfoError) {
+ reason := fmt.Sprintf("failed to get FE feature or FE does not support multi-warehouse %s/%s",
+ warehouse.Namespace, warehouse.Name)
+ warehouse.Status.Reason = reason
+ klog.Info(reason)
+ return ctrl.Result{}, nil
+ }
+ reason := fmt.Sprintf("failed to reconcile component, namespace=%v, name=%v, controller=%v, error=%v",
+ warehouse.Namespace, warehouse.Name, controller.GetControllerName(), err)
+ warehouse.Status.Reason = reason
+ klog.Info(err)
+ return ctrl.Result{}, err
+ }
+ }
+
+ for _, controller := range r.subControllers {
+ klog.Infof("StarRocksWarehouseReconciler update component status, namespace=%v, name=%v, controller=%v",
+ warehouse.Namespace, warehouse.Name, controller.GetControllerName())
+ if err := controller.UpdateWarehouseStatus(warehouse); err != nil {
+ klog.Infof("failed to reconcile component, namespace=%v, name=%v, controller=%v, error=%v",
+ warehouse.Namespace, warehouse.Name, controller.GetControllerName(), err)
+ return requeueIfError(err)
+ }
+ }
+
+ return ctrl.Result{}, r.UpdateStarRocksWarehouseStatus(ctx, warehouse)
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *StarRocksWarehouseReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&srapi.StarRocksWarehouse{}).
+ Owns(&appv1.StatefulSet{}).
+ Owns(&corev1.ConfigMap{}).
+ Owns(&corev1.Service{}).
+ Owns(&v2.HorizontalPodAutoscaler{}).
+ Complete(r)
+}
+
+func SetupWarehouseReconciler(mgr ctrl.Manager) error {
+ // check StarRocksWarehouse CRD exists or not
+ if err := mgr.GetAPIReader().List(context.Background(), &srapi.StarRocksWarehouseList{}); err != nil {
+ if meta.IsNoMatchError(err) {
+ klog.Infof("StarRocksWarehouse CRD is not found, skip StarRocksWarehouseReconciler")
+ return nil
+ }
+ return err
+ }
+
+ reconciler := &StarRocksWarehouseReconciler{
+ Client: mgr.GetClient(),
+ recorder: mgr.GetEventRecorderFor(name),
+ subControllers: []sub_controller.WarehouseSubController{cn.New(mgr.GetClient())},
+ }
+ if err := reconciler.SetupWithManager(mgr); err != nil {
+ klog.Error(err, "failed to setup StarRocksWarehouseReconciler")
+ return err
+ }
+ return nil
+}
+
+// UpdateStarRocksWarehouseStatus update the status of warehouse.
+func (r *StarRocksWarehouseReconciler) UpdateStarRocksWarehouseStatus(ctx context.Context, warehouse *srapi.StarRocksWarehouse) error {
+ return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ actualWarehouse := &srapi.StarRocksWarehouse{}
+ if err := r.Client.Get(ctx, types.NamespacedName{Namespace: warehouse.Namespace, Name: warehouse.Name}, actualWarehouse); err != nil {
+ return err
+ }
+ actualWarehouse.Status = warehouse.Status
+ return r.Client.Status().Update(ctx, actualWarehouse)
+ })
+}
diff --git a/pkg/starrockswarehouse_controller_test.go b/pkg/starrockswarehouse_controller_test.go
new file mode 100644
index 00000000..fc2233a2
--- /dev/null
+++ b/pkg/starrockswarehouse_controller_test.go
@@ -0,0 +1,322 @@
+package pkg
+
+import (
+ "context"
+ "reflect"
+ "testing"
+
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller/cn"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ controllerruntime "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func newStarRocksWarehouseController(objects ...runtime.Object) *StarRocksWarehouseReconciler {
+ client := k8sutils.NewFakeClient(Scheme, objects...)
+ warehouseController := &StarRocksWarehouseReconciler{
+ recorder: record.NewFakeRecorder(10),
+ Client: client,
+ subControllers: []sub_controller.WarehouseSubController{
+ cn.New(client),
+ },
+ }
+ return warehouseController
+}
+
+func TestStarRocksWarehouseReconciler_Reconcile(t *testing.T) {
+ type fields struct {
+ reconciler *StarRocksWarehouseReconciler
+ }
+ type args struct {
+ ctx context.Context
+ req controllerruntime.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ want controllerruntime.Result
+ wantErr bool
+ }{
+ {
+ name: "test warehouse reconcile without cr",
+ fields: fields{
+ reconciler: newStarRocksWarehouseController(),
+ },
+ args: args{
+ ctx: context.TODO(),
+ req: controllerruntime.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "test",
+ Namespace: "test",
+ },
+ },
+ },
+ want: controllerruntime.Result{},
+ wantErr: false,
+ },
+ {
+ name: "test warehouse reconcile without specified cluster",
+ fields: fields{
+ reconciler: newStarRocksWarehouseController(
+ &v1.StarRocksWarehouse{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1.StarRocksWarehouseSpec{
+ Template: &v1.WarehouseComponentSpec{},
+ },
+ }),
+ },
+ args: args{
+ ctx: context.TODO(),
+ req: controllerruntime.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "test",
+ Namespace: "test",
+ },
+ },
+ },
+ want: controllerruntime.Result{},
+ wantErr: false,
+ },
+ {
+ name: "test warehouse reconcile without cluster",
+ fields: fields{
+ reconciler: newStarRocksWarehouseController(
+ &v1.StarRocksWarehouse{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1.StarRocksWarehouseSpec{
+ StarRocksCluster: "cluster",
+ },
+ }),
+ },
+ args: args{
+ ctx: context.TODO(),
+ req: controllerruntime.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "test",
+ Namespace: "test",
+ },
+ },
+ },
+ want: controllerruntime.Result{},
+ wantErr: false,
+ },
+ {
+ name: "test warehouse reconcile with not ready cluster",
+ fields: fields{
+ reconciler: newStarRocksWarehouseController(
+ &v1.StarRocksWarehouse{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1.StarRocksWarehouseSpec{
+ StarRocksCluster: "test",
+ Template: &v1.WarehouseComponentSpec{},
+ },
+ },
+ &v1.StarRocksCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1.StarRocksClusterSpec{
+ StarRocksFeSpec: &v1.StarRocksFeSpec{
+ StarRocksComponentSpec: v1.StarRocksComponentSpec{
+ StarRocksLoadSpec: v1.StarRocksLoadSpec{
+ ConfigMapInfo: v1.ConfigMapInfo{
+ ConfigMapName: "fe-configmap",
+ ResolveKey: "fe.conf",
+ },
+ },
+ },
+ },
+ },
+ },
+ &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fe-configmap",
+ Namespace: "test",
+ },
+ Data: map[string]string{
+ "fe.conf": "run_mode = shared_data",
+ },
+ },
+ &corev1.Endpoints{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-fe-service",
+ Namespace: "test",
+ },
+ Subsets: []corev1.EndpointSubset{{
+ // no ready address
+ Addresses: []corev1.EndpointAddress{},
+ }},
+ },
+ ),
+ },
+ args: args{
+ ctx: context.TODO(),
+ req: controllerruntime.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "test",
+ Namespace: "test",
+ },
+ },
+ },
+ want: controllerruntime.Result{},
+ wantErr: false,
+ },
+ {
+ name: "test warehouse reconcile",
+ fields: fields{
+ reconciler: newStarRocksWarehouseController(
+ &v1.StarRocksWarehouse{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1.StarRocksWarehouseSpec{
+ StarRocksCluster: "test",
+ Template: &v1.WarehouseComponentSpec{},
+ },
+ },
+ &v1.StarRocksCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1.StarRocksClusterSpec{
+ StarRocksFeSpec: &v1.StarRocksFeSpec{
+ StarRocksComponentSpec: v1.StarRocksComponentSpec{
+ StarRocksLoadSpec: v1.StarRocksLoadSpec{
+ ConfigMapInfo: v1.ConfigMapInfo{
+ ConfigMapName: "fe-configmap",
+ ResolveKey: "fe.conf",
+ },
+ },
+ },
+ },
+ },
+ },
+ &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fe-configmap",
+ Namespace: "test",
+ },
+ Data: map[string]string{
+ "fe.conf": "run_mode = shared_data",
+ },
+ },
+ &corev1.Endpoints{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-fe-service",
+ Namespace: "test",
+ },
+ Subsets: []corev1.EndpointSubset{{
+ Addresses: []corev1.EndpointAddress{{IP: "127.0.0.1"}},
+ }},
+ },
+ ),
+ },
+ args: args{
+ ctx: context.TODO(),
+ req: controllerruntime.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "test",
+ Namespace: "test",
+ },
+ },
+ },
+ want: controllerruntime.Result{},
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.fields.reconciler.Reconcile(tt.args.ctx, tt.args.req)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Reconcile() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Reconcile() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestStarRocksWarehouseReconciler_SetupWithManager(t *testing.T) {
+ type fields struct {
+ Client client.Client
+ recorder record.EventRecorder
+ subControllers []sub_controller.WarehouseSubController
+ }
+ type args struct {
+ mgr controllerruntime.Manager
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ wantErr bool
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ r := &StarRocksWarehouseReconciler{
+ Client: tt.fields.Client,
+ recorder: tt.fields.recorder,
+ subControllers: tt.fields.subControllers,
+ }
+ if err := r.SetupWithManager(tt.args.mgr); (err != nil) != tt.wantErr {
+ t.Errorf("SetupWithManager() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestStarRocksWarehouseReconciler_UpdateStarRocksWarehouseStatus(t *testing.T) {
+ type fields struct {
+ Client client.Client
+ recorder record.EventRecorder
+ subControllers []sub_controller.WarehouseSubController
+ }
+ type args struct {
+ ctx context.Context
+ warehouse *v1.StarRocksWarehouse
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ wantErr bool
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ r := &StarRocksWarehouseReconciler{
+ Client: tt.fields.Client,
+ recorder: tt.fields.recorder,
+ subControllers: tt.fields.subControllers,
+ }
+ if err := r.UpdateStarRocksWarehouseStatus(tt.args.ctx, tt.args.warehouse); (err != nil) != tt.wantErr {
+ t.Errorf("UpdateStarRocksWarehouseStatus() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/pkg/sub_controller/be/be_controller.go b/pkg/sub_controller/be/be_controller.go
index 7ad690ee..a3d1f5fc 100644
--- a/pkg/sub_controller/be/be_controller.go
+++ b/pkg/sub_controller/be/be_controller.go
@@ -24,6 +24,7 @@ import (
rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/pod"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/statefulset"
@@ -52,7 +53,7 @@ func (be *BeController) GetControllerName() string {
return "beController"
}
-func (be *BeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) error {
+func (be *BeController) SyncCluster(ctx context.Context, src *srapi.StarRocksCluster) error {
if src.Spec.StarRocksBeSpec == nil {
if err := be.ClearResources(ctx, src); err != nil {
klog.Errorf("beController sync clearResource namespace=%s,srcName=%s, err=%s\n", src.Namespace, src.Name, err.Error())
@@ -62,7 +63,7 @@ func (be *BeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
return nil
}
- if !fe.CheckFEOk(ctx, be.k8sClient, src) {
+ if !fe.CheckFEReady(ctx, be.k8sClient, src.Namespace, src.Name) {
return nil
}
@@ -78,17 +79,17 @@ func (be *BeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
}
feconfig, _ := be.getFeConfig(ctx, &src.Spec.StarRocksFeSpec.ConfigMapInfo, src.Namespace)
- // annotation: add query port in cnconfig.
+ // add query port from fe config.
config[rutils.QUERY_PORT] = strconv.FormatInt(int64(rutils.GetPort(feconfig, rutils.QUERY_PORT)), 10)
// generate new be external service.
- externalsvc := rutils.BuildExternalService(src, service.ExternalServiceName(src.Name, beSpec), rutils.BeService, config,
- load.Selector(src.Name, beSpec), load.Labels(src.Name, beSpec))
+ externalsvc := rutils.BuildExternalService(object.NewFromCluster(src),
+ beSpec, config, load.Selector(src.Name, beSpec), load.Labels(src.Name, beSpec))
// generate internal fe service, update the status of cn on src.
internalService := be.generateInternalService(ctx, src, &externalsvc, config)
// create be statefulset.
podTemplateSpec := be.buildPodTemplate(src, config)
- st := statefulset.MakeStatefulset(src, beSpec, podTemplateSpec)
+ st := statefulset.MakeStatefulset(object.NewFromCluster(src), beSpec, podTemplateSpec)
// update the statefulset if feSpec be updated.
if err = k8sutils.ApplyStatefulSet(ctx, be.k8sClient, &st, func(new *appv1.StatefulSet, est *appv1.StatefulSet) bool {
@@ -117,8 +118,8 @@ func (be *BeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
return err
}
-// UpdateStatus update the all resource status about be.
-func (be *BeController) UpdateStatus(src *srapi.StarRocksCluster) error {
+// UpdateClusterStatus update the all resource status about be.
+func (be *BeController) UpdateClusterStatus(src *srapi.StarRocksCluster) error {
// if spec is not exist, status is empty. but before clear status we must clear all resource about be used by ClearResources.
beSpec := src.Spec.StarRocksBeSpec
if beSpec == nil {
@@ -141,7 +142,7 @@ func (be *BeController) UpdateStatus(src *srapi.StarRocksCluster) error {
statefulSetName := load.Name(src.Name, beSpec)
if err := be.k8sClient.Get(context.Background(),
types.NamespacedName{Namespace: src.Namespace, Name: statefulSetName}, &st); apierrors.IsNotFound(err) {
- klog.Infof("BeController UpdateStatus the statefulset name=%s is not found.\n", statefulSetName)
+ klog.Infof("BeController UpdateClusterStatus the statefulset name=%s is not found.\n", statefulSetName)
return nil
} else if err != nil {
return err
diff --git a/pkg/sub_controller/be/be_controller_test.go b/pkg/sub_controller/be/be_controller_test.go
index dd3dcebd..ca56e777 100644
--- a/pkg/sub_controller/be/be_controller_test.go
+++ b/pkg/sub_controller/be/be_controller_test.go
@@ -42,7 +42,7 @@ var (
)
func init() {
- groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1alpha1"}
+ groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
schemeBuilder := &scheme.Builder{GroupVersion: groupVersion}
@@ -163,9 +163,9 @@ func Test_Sync(t *testing.T) {
}
bc := New(k8sutils.NewFakeClient(sch, src, &ep))
- err := bc.Sync(context.Background(), src)
+ err := bc.SyncCluster(context.Background(), src)
require.Equal(t, nil, err)
- err = bc.UpdateStatus(src)
+ err = bc.UpdateClusterStatus(src)
require.Equal(t, nil, err)
beStatus := src.Status.StarRocksBeStatus
require.Equal(t, beStatus.Phase, srapi.ComponentReconciling)
diff --git a/pkg/sub_controller/be/be_pod.go b/pkg/sub_controller/be/be_pod.go
index 819a5f10..0a1c6359 100644
--- a/pkg/sub_controller/be/be_pod.go
+++ b/pkg/sub_controller/be/be_pod.go
@@ -75,7 +75,7 @@ func (be *BeController) buildPodTemplate(src *srapi.StarRocksCluster, config map
vols, volumeMounts = pod.MountSecrets(vols, volumeMounts, beSpec.Secrets)
feExternalServiceName := service.ExternalServiceName(src.Name, src.Spec.StarRocksFeSpec)
- Envs := pod.Envs(src.Spec.StarRocksBeSpec, config, feExternalServiceName, src.Namespace, beSpec.BeEnvVars)
+ envs := pod.Envs(src.Spec.StarRocksBeSpec, config, feExternalServiceName, src.Namespace, beSpec.BeEnvVars)
webServerPort := rutils.GetPort(config, rutils.WEBSERVER_PORT)
beContainer := corev1.Container{
Name: srapi.DEFAULT_BE,
@@ -83,7 +83,7 @@ func (be *BeController) buildPodTemplate(src *srapi.StarRocksCluster, config map
Command: []string{"/opt/starrocks/be_entrypoint.sh"},
Args: []string{"$(FE_SERVICE_NAME)"},
Ports: pod.Ports(beSpec, config),
- Env: Envs,
+ Env: envs,
Resources: beSpec.ResourceRequirements,
ImagePullPolicy: corev1.PullIfNotPresent,
VolumeMounts: volumeMounts,
@@ -100,7 +100,7 @@ func (be *BeController) buildPodTemplate(src *srapi.StarRocksCluster, config map
})
}
- podSpec := pod.Spec(beSpec, src.Spec.ServiceAccount, beContainer, vols)
+ podSpec := pod.Spec(beSpec, beContainer, vols)
annotations := pod.Annotations(beSpec)
podSpec.SecurityContext = pod.PodSecurityContext(beSpec)
diff --git a/pkg/sub_controller/cn/autoscaler.go b/pkg/sub_controller/cn/autoscaler.go
deleted file mode 100644
index 9c81c5f9..00000000
--- a/pkg/sub_controller/cn/autoscaler.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-Copyright 2021-present, StarRocks Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cn
-
-import (
- srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
- rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
- "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
- appv1 "k8s.io/api/apps/v1"
-)
-
-func (cc *CnController) generateAutoScalerName(src *srapi.StarRocksCluster) string {
- return load.Name(src.Name, src.Spec.StarRocksCnSpec) + "-autoscaler"
-}
-
-func (cc *CnController) buildCnAutoscalerParams(scalerInfo srapi.AutoScalingPolicy, target *appv1.StatefulSet,
- src *srapi.StarRocksCluster) *rutils.PodAutoscalerParams {
- labels := rutils.Labels{}
- labels.AddLabel(target.Labels)
- labels.Add(srapi.ComponentLabelKey, "autoscaler")
-
- return &rutils.PodAutoscalerParams{
- Namespace: target.Namespace,
- Name: cc.generateAutoScalerName(src),
- Labels: labels,
- AutoscalerType: src.Spec.StarRocksCnSpec.AutoScalingPolicy.Version,
- TargetName: target.Name,
- // use src as ownerReference for reconciling on autoscaler updated.
- OwnerReferences: target.OwnerReferences,
- ScalerPolicy: &scalerInfo,
- }
-}
diff --git a/pkg/sub_controller/cn/cn_controller.go b/pkg/sub_controller/cn/cn_controller.go
index e20414ba..f176dcec 100644
--- a/pkg/sub_controller/cn/cn_controller.go
+++ b/pkg/sub_controller/cn/cn_controller.go
@@ -18,21 +18,28 @@ package cn
import (
"context"
+ "errors"
"fmt"
"strconv"
+ "strings"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/constant"
srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/hash"
rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/pod"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/statefulset"
subc "github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller/fe"
appv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/autoscaling/v1"
+ v2 "k8s.io/api/autoscaling/v2"
+ "k8s.io/api/autoscaling/v2beta2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
@@ -55,7 +62,43 @@ func (cc *CnController) GetControllerName() string {
return "cnController"
}
-func (cc *CnController) Sync(ctx context.Context, src *srapi.StarRocksCluster) error {
+var SpecMissingError = errors.New("spec.template or spec.starRocksCluster is missing")
+var StarRocksClusterMissingError = errors.New("custom resource StarRocksCluster is missing")
+var FeNotReadyError = errors.New("component fe is not ready")
+var StarRocksClusterRunModeError = errors.New("StarRocks Cluster should run in shared_data mode")
+var GetFeFeatureInfoError = errors.New("failed to invoke FE /api/v2/feature or FE does not support multi-warehouse feature")
+
+func (cc *CnController) SyncWarehouse(ctx context.Context, warehouse *srapi.StarRocksWarehouse) error {
+ template := warehouse.Spec.Template
+ if warehouse.Spec.StarRocksCluster == "" || template == nil {
+ return SpecMissingError
+ }
+
+ klog.Infof("CnController get StarRocksCluster %s/%s to sync warehouse %s/%s",
+ warehouse.Namespace, warehouse.Spec.StarRocksCluster, warehouse.Namespace, warehouse.Name)
+ _, err := cc.getStarRocksCluster(warehouse.Namespace, warehouse.Spec.StarRocksCluster)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return StarRocksClusterMissingError
+ }
+ return err
+ }
+
+ feconfig, err := cc.getFeConfig(ctx, warehouse.Namespace, warehouse.Spec.StarRocksCluster)
+ if err != nil {
+ return err
+ }
+ if val := feconfig["run_mode"]; val == nil || !strings.Contains(val.(string), "shared_data") {
+ return StarRocksClusterRunModeError
+ }
+
+ if !fe.CheckFEReady(ctx, cc.k8sClient, warehouse.Namespace, warehouse.Spec.StarRocksCluster) {
+ return FeNotReadyError
+ }
+ return cc.SyncCnSpec(ctx, object.NewFromWarehouse(warehouse), template.ToCnSpec())
+}
+
+func (cc *CnController) SyncCluster(ctx context.Context, src *srapi.StarRocksCluster) error {
if src.Spec.StarRocksCnSpec == nil {
if err := cc.ClearResources(ctx, src); err != nil {
klog.Errorf("cnController sync namespace=%s, name=%s, err=%s", src.Namespace, src.Name, err.Error())
@@ -63,44 +106,50 @@ func (cc *CnController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
return nil
}
- if err := cc.mutating(src); err != nil {
- klog.Errorf("cnController sync failed when mutating, namespace=%s, name=%s, err=%s",
- src.Namespace, src.Name, err.Error())
- return err
+ if !fe.CheckFEReady(ctx, cc.k8sClient, src.Namespace, src.Name) {
+ return nil
}
- if err := cc.validating(src); err != nil {
- klog.Errorf("cnController sync failed when validating, namespace=%s, name=%s, err=%s",
- src.Namespace, src.Name, err.Error())
+ return cc.SyncCnSpec(ctx, object.NewFromCluster(src), src.Spec.StarRocksCnSpec)
+}
+
+func (cc *CnController) SyncCnSpec(ctx context.Context, object object.StarRocksObject, cnSpec *srapi.StarRocksCnSpec) error {
+ if err := cc.mutating(cnSpec); err != nil {
return err
}
- if !fe.CheckFEOk(ctx, cc.k8sClient, src) {
- return nil
+ if err := cc.validating(cnSpec); err != nil {
+ return err
}
- cnSpec := src.Spec.StarRocksCnSpec
-
- // get the cn configMap for resolve ports.
- // 2. get config for generate statefulset and service.
- config, err := cc.GetConfig(ctx, &cnSpec.ConfigMapInfo, src.Namespace)
+ klog.Infof("CnController get the query port from fe ConfigMap to resolve port, namespace=%s, name=%s",
+ object.Namespace, object.Name)
+ config, err := cc.GetConfig(ctx, &cnSpec.ConfigMapInfo, object.Namespace)
+ if err != nil {
+ return err
+ }
+ feconfig, err := cc.getFeConfig(ctx, object.Namespace, object.ClusterName)
if err != nil {
- klog.Error("CnController Sync ", "resolve cn configmap failed, namespace ", src.Namespace, " configmapName ",
- cnSpec.ConfigMapInfo.ConfigMapName, " configMapKey ", cnSpec.ConfigMapInfo.ResolveKey, " error ", err)
return err
}
-
- feconfig, _ := cc.getFeConfig(ctx, &src.Spec.StarRocksFeSpec.ConfigMapInfo, src.Namespace)
- // annotation: add query port in cn config.
config[rutils.QUERY_PORT] = strconv.FormatInt(int64(rutils.GetPort(feconfig, rutils.QUERY_PORT)), 10)
+ config[rutils.HTTP_PORT] = strconv.FormatInt(int64(rutils.GetPort(feconfig, rutils.HTTP_PORT)), 10)
- // generate new cn internal service.
- externalsvc := rutils.BuildExternalService(src, service.ExternalServiceName(src.Name, cnSpec), rutils.CnService, config,
- load.Selector(src.Name, cnSpec), load.Labels(src.Name, cnSpec))
- // create or update fe service, update the status of cn on src.
- // publish the service.
- // patch the internal service for fe and cn connection.
- searchServiceName := service.SearchServiceName(src.Name, cnSpec)
+ klog.Infof("CnController build and apply statefulset for cn, namespace=%s, name=%s", object.Namespace, object.AliasName)
+ podTemplateSpec, err := cc.buildPodTemplate(object, cnSpec, config)
+ if err != nil {
+ return err
+ }
+ sts := statefulset.MakeStatefulset(object, cnSpec, *podTemplateSpec)
+ if err = cc.applyStatefulset(ctx, &sts); err != nil {
+ return err
+ }
+
+ klog.Infof("CnController build external and internal service for cn, namespace=%s, name=%s",
+ object.Namespace, object.AliasName)
+ externalsvc := rutils.BuildExternalService(object, cnSpec, config,
+ load.Selector(object.AliasName, cnSpec), load.Labels(object.AliasName, cnSpec))
+ searchServiceName := service.SearchServiceName(object.AliasName, cnSpec)
internalService := service.MakeSearchService(searchServiceName, &externalsvc, []corev1.ServicePort{
{
Name: "heartbeat",
@@ -109,40 +158,24 @@ func (cc *CnController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
},
})
- // create cn statefulset.
- podTemplateSpec := cc.buildPodTemplate(src, config)
- st := statefulset.MakeStatefulset(src, cnSpec, podTemplateSpec)
- if err = cc.applyStatefulset(ctx, &st); err != nil {
- klog.Errorf("CnController Sync applyStatefulset name=%s, namespace=%s, failed. err=%s\n", st.Name, st.Namespace, err.Error())
+ klog.Infof("CnController apply external and internal service for cn, namespace=%s, name=%s",
+ object.Namespace, object.AliasName)
+ if err := k8sutils.ApplyService(ctx, cc.k8sClient, &externalsvc, rutils.ServiceDeepEqual); err != nil {
return err
}
-
- if err = k8sutils.ApplyService(ctx, cc.k8sClient, internalService, func(new *corev1.Service, esvc *corev1.Service) bool {
+ if err := k8sutils.ApplyService(ctx, cc.k8sClient, internalService, func(new *corev1.Service, esvc *corev1.Service) bool {
// for compatible v1.5, we use `cn-domain-search` for internal communicating.
- internalService.Name = st.Spec.ServiceName
+ internalService.Name = sts.Spec.ServiceName
return rutils.ServiceDeepEqual(new, esvc)
}); err != nil {
- klog.Infof(""+
- " Sync patch internal service namespace=%s, name=%s, error=%s", internalService.Namespace, internalService.Name)
- return err
- }
- // 3.2 patch the external service for users access cn service.
- if err = k8sutils.ApplyService(ctx, cc.k8sClient, &externalsvc, rutils.ServiceDeepEqual); err != nil {
- klog.Infof("CnController Sync patch external service namespace=%s, name=%s, error=%s", externalsvc.Namespace, externalsvc.Name)
return err
}
- // 4. create autoscaler.
+ klog.Infof("CnController build and apply HPA for cn, namespace=%s, name=%s", object.Namespace, object.AliasName)
if cnSpec.AutoScalingPolicy != nil {
- err = cc.deployAutoScaler(ctx, *cnSpec.AutoScalingPolicy, &st, src)
- } else {
- if src.Status.StarRocksCnStatus == nil || src.Status.StarRocksCnStatus.HorizontalScaler.Name == "" {
- return nil
- }
- err = cc.deleteAutoScaler(ctx, src)
+ return cc.deployAutoScaler(ctx, object, cnSpec, *cnSpec.AutoScalingPolicy, &sts)
}
-
- return err
+ return nil
}
func (cc *CnController) applyStatefulset(ctx context.Context, st *appv1.StatefulSet) error {
@@ -182,59 +215,157 @@ func (cc *CnController) applyStatefulset(ctx context.Context, st *appv1.Stateful
return nil
}
-func (cc *CnController) UpdateStatus(src *srapi.StarRocksCluster) error {
- // if spec is not exist, status is empty. but before clear status we must clear all resource about be used by ClearResources.
+// UpdateWarehouseStatus updates the status of StarRocksWarehouse.
+func (cc *CnController) UpdateWarehouseStatus(warehouse *srapi.StarRocksWarehouse) error {
+ template := warehouse.Spec.Template
+ if template == nil {
+ warehouse.Status.WarehouseComponentStatus = nil
+ return nil
+ }
+
+ status := warehouse.Status.WarehouseComponentStatus
+ status.Phase = srapi.ComponentReconciling
+ return cc.UpdateStatus(object.NewFromWarehouse(warehouse), template.ToCnSpec(), status)
+}
+
+// UpdateClusterStatus update the status of StarRocksCluster.
+func (cc *CnController) UpdateClusterStatus(src *srapi.StarRocksCluster) error {
cnSpec := src.Spec.StarRocksCnSpec
if cnSpec == nil {
src.Status.StarRocksCnStatus = nil
return nil
}
- cs := &srapi.StarRocksCnStatus{}
- if src.Status.StarRocksCnStatus != nil {
- cs = src.Status.StarRocksCnStatus.DeepCopy()
+
+ if src.Status.StarRocksCnStatus == nil {
+ src.Status.StarRocksCnStatus = &srapi.StarRocksCnStatus{
+ StarRocksComponentStatus: srapi.StarRocksComponentStatus{
+ Phase: srapi.ComponentReconciling,
+ },
+ }
}
+ cs := src.Status.StarRocksCnStatus
cs.Phase = srapi.ComponentReconciling
- src.Status.StarRocksCnStatus = cs
+ return cc.UpdateStatus(object.NewFromCluster(src), cnSpec, cs)
+}
+
+func (cc *CnController) UpdateStatus(object object.StarRocksObject,
+ cnSpec *srapi.StarRocksCnSpec, cnStatus *srapi.StarRocksCnStatus) error {
var st appv1.StatefulSet
- statefulSetName := load.Name(src.Name, cnSpec)
- err := cc.k8sClient.Get(context.Background(), types.NamespacedName{Namespace: src.Namespace, Name: statefulSetName}, &st)
- if apierrors.IsNotFound(err) {
- klog.Infof("CnController UpdateStatus the statefulset name=%s is not found.\n", statefulSetName)
+ statefulSetName := load.Name(object.AliasName, cnSpec)
+ namespacedName := types.NamespacedName{Namespace: object.Namespace, Name: statefulSetName}
+ if err := cc.k8sClient.Get(context.Background(), namespacedName, &st); apierrors.IsNotFound(err) {
+ klog.Infof("CnController UpdateStatus the statefulset name=%s is not found.\n", statefulSetName)
return nil
}
if cnSpec.AutoScalingPolicy != nil {
- cs.HorizontalScaler.Name = cc.generateAutoScalerName(src)
- cs.HorizontalScaler.Version = cnSpec.AutoScalingPolicy.Version.Complete(k8sutils.KUBE_MAJOR_VERSION,
+ cnStatus.HorizontalScaler.Name = cc.generateAutoScalerName(object.AliasName, cnSpec)
+ cnStatus.HorizontalScaler.Version = cnSpec.AutoScalingPolicy.Version.Complete(k8sutils.KUBE_MAJOR_VERSION,
k8sutils.KUBE_MINOR_VERSION)
} else {
- cs.HorizontalScaler = srapi.HorizontalScaler{}
+ cnStatus.HorizontalScaler = srapi.HorizontalScaler{}
}
- cs.ServiceName = service.ExternalServiceName(src.Name, cnSpec)
- cs.ResourceNames = rutils.MergeSlices(cs.ResourceNames, []string{statefulSetName})
+ cnStatus.ServiceName = service.ExternalServiceName(object.AliasName, cnSpec)
+ cnStatus.ResourceNames = rutils.MergeSlices(cnStatus.ResourceNames, []string{statefulSetName})
- if err := subc.UpdateStatus(&cs.StarRocksComponentStatus, cc.k8sClient,
- src.Namespace, load.Name(src.Name, cnSpec), pod.Labels(src.Name, cnSpec), subc.StatefulSetLoadType); err != nil {
+ if err := subc.UpdateStatus(&cnStatus.StarRocksComponentStatus, cc.k8sClient,
+ object.Namespace, load.Name(object.AliasName, cnSpec), pod.Labels(object.AliasName, cnSpec), subc.StatefulSetLoadType); err != nil {
return err
}
return nil
}
+func (cc *CnController) ClearWarehouse(ctx context.Context, namespace string, name string) error {
+ executor, err := NewSQLExecutor(cc.k8sClient, namespace, object.GetAliasName(name))
+ if err != nil {
+ klog.Infof("CnController ClearWarehouse NewSQLExecutor error=%s", err.Error())
+ return err
+ }
+
+ err = executor.Execute(ctx, fmt.Sprintf("DROP WAREHOUSE %s", strings.ReplaceAll(name, "-", "_")))
+ if err != nil {
+ klog.Infof("CnController failed DROP WAREHOUSE <%v>, error=%s", strings.ReplaceAll(name, "-", "_"), err.Error())
+ // we do not return error here, because we want to delete the statefulset anyway.
+ }
+
+ // Remove the finalizer from cn statefulset
+ var sts appv1.StatefulSet
+ if err = cc.k8sClient.Get(context.Background(),
+ types.NamespacedName{
+ Namespace: namespace,
+ Name: load.Name(object.GetAliasName(name), (*srapi.StarRocksCnSpec)(nil)),
+ },
+ &sts); err != nil {
+ return err
+ }
+ sts.Finalizers = nil
+ if err = k8sutils.UpdateClientObject(context.Background(), cc.k8sClient, &sts); err != nil {
+ return err
+ }
+
+ // return err
+ return err
+}
+
// Deploy autoscaler
-func (cc *CnController) deployAutoScaler(ctx context.Context, policy srapi.AutoScalingPolicy, target *appv1.StatefulSet,
- src *srapi.StarRocksCluster) error {
- params := cc.buildCnAutoscalerParams(policy, target, src)
- autoScaler := rutils.BuildHorizontalPodAutoscaler(params)
- if err := k8sutils.CreateOrUpdate(ctx, cc.k8sClient, autoScaler); err != nil {
- klog.Errorf("cnController deployAutoscaler failed, namespace=%s,name=%s,version=%s,error=%s",
- autoScaler.GetNamespace(), autoScaler.GetName(), policy.Version, err.Error())
+func (cc *CnController) deployAutoScaler(ctx context.Context, object object.StarRocksObject, cnSpec *srapi.StarRocksCnSpec,
+ policy srapi.AutoScalingPolicy, target *appv1.StatefulSet) error {
+ labels := rutils.Labels{}
+ labels.AddLabel(target.Labels)
+ labels.Add(srapi.ComponentLabelKey, "autoscaler")
+ autoscalerParams := &rutils.PodAutoscalerParams{
+ Namespace: target.Namespace,
+ Name: cc.generateAutoScalerName(object.AliasName, cnSpec),
+ Labels: labels,
+ AutoscalerType: cnSpec.AutoScalingPolicy.Version, // cnSpec.AutoScalingPolicy can not be nil
+ TargetName: target.Name,
+ OwnerReferences: target.OwnerReferences,
+ ScalerPolicy: &policy,
+ }
+
+ autoScaler := rutils.BuildHorizontalPodAutoscaler(autoscalerParams)
+ autoScaler.SetAnnotations(make(map[string]string))
+ var clientObject client.Object
+ t := autoscalerParams.AutoscalerType.Complete(k8sutils.KUBE_MAJOR_VERSION, k8sutils.KUBE_MINOR_VERSION)
+ switch t {
+ case srapi.AutoScalerV1:
+ clientObject = &v1.HorizontalPodAutoscaler{}
+ case srapi.AutoScalerV2:
+ clientObject = &v2.HorizontalPodAutoscaler{}
+ case srapi.AutoScalerV2Beta2:
+ clientObject = &v2beta2.HorizontalPodAutoscaler{}
+ }
+ if err := cc.k8sClient.Get(ctx,
+ types.NamespacedName{
+ Namespace: autoscalerParams.Namespace,
+ Name: autoscalerParams.Name,
+ },
+ clientObject,
+ ); err != nil {
+ if apierrors.IsNotFound(err) {
+ return cc.k8sClient.Create(ctx, autoScaler)
+ }
return err
}
- return nil
+ var expectHash, actualHash string
+ expectHash = hash.HashObject(autoScaler)
+ if v, ok := clientObject.GetAnnotations()[srapi.ComponentResourceHash]; ok {
+ actualHash = v
+ } else {
+ actualHash = hash.HashObject(clientObject)
+ }
+
+ if expectHash == actualHash {
+ klog.Infof("cnController deployAutoscaler not need update, namespace=%s,name=%s,version=%s",
+ autoScaler.GetNamespace(), autoScaler.GetName(), t)
+ return nil
+ }
+ autoScaler.GetAnnotations()[srapi.ComponentResourceHash] = expectHash
+ return cc.k8sClient.Update(ctx, autoScaler)
}
// deleteAutoScaler delete the autoscaler.
@@ -299,9 +430,10 @@ func (cc *CnController) ClearResources(ctx context.Context, src *srapi.StarRocks
func (cc *CnController) GetConfig(ctx context.Context,
configMapInfo *srapi.ConfigMapInfo, namespace string) (map[string]interface{}, error) {
+ klog.Infof("CnController get configMap from %s/%s", namespace, configMapInfo.ConfigMapName)
configMap, err := k8sutils.GetConfigMap(ctx, cc.k8sClient, namespace, configMapInfo.ConfigMapName)
if err != nil && apierrors.IsNotFound(err) {
- klog.Info("CnController GetCnConfig cn config is not exist namespace ", namespace, " configmapName ", configMapInfo.ConfigMapName)
+ klog.Infof("ConfigMap for conf is missing, namespace=%s", namespace)
return make(map[string]interface{}), nil
} else if err != nil {
return make(map[string]interface{}), err
@@ -312,11 +444,17 @@ func (cc *CnController) GetConfig(ctx context.Context,
}
func (cc *CnController) getFeConfig(ctx context.Context,
- feconfigMapInfo *srapi.ConfigMapInfo, namespace string) (map[string]interface{}, error) {
- feconfigMap, err := k8sutils.GetConfigMap(ctx, cc.k8sClient, namespace, feconfigMapInfo.ConfigMapName)
+ clusterNamespace string, clusterName string) (map[string]interface{}, error) {
+ src, err := cc.getStarRocksCluster(clusterNamespace, clusterName)
+ if err != nil {
+ return nil, err
+ }
+ feconfigMapInfo := &src.Spec.StarRocksFeSpec.ConfigMapInfo
+
+ feconfigMap, err := k8sutils.GetConfigMap(ctx, cc.k8sClient, clusterNamespace, feconfigMapInfo.ConfigMapName)
if err != nil && apierrors.IsNotFound(err) {
- klog.V(constant.LOG_LEVEL).Info("CnController getFeConfig fe config is not exist namespace ",
- namespace, " configmapName ", feconfigMapInfo.ConfigMapName)
+ klog.V(constant.LOG_LEVEL).Info("CnController getFeConfig fe config is not exist namespace ", clusterNamespace,
+ " configmapName ", feconfigMapInfo.ConfigMapName)
return make(map[string]interface{}), nil
} else if err != nil {
return make(map[string]interface{}), err
@@ -325,26 +463,21 @@ func (cc *CnController) getFeConfig(ctx context.Context,
return res, err
}
-func (cc *CnController) mutating(src *srapi.StarRocksCluster) error {
- spec := src.Spec.StarRocksCnSpec
-
+func (cc *CnController) mutating(cnSpec *srapi.StarRocksCnSpec) error {
// Mutating because of the autoscaling policy.
// When the HPA policy with a fixed replica count is set: every time the starrockscluster CR is
// applied, the replica count of the StatefulSet object in K8S will be reset to the value
// specified by the 'Replicas' field, erasing the value previously set by HPA.
- policy := spec.AutoScalingPolicy
+ policy := cnSpec.AutoScalingPolicy
if policy != nil {
- spec.Replicas = nil
+ cnSpec.Replicas = nil
}
-
return nil
}
-func (cc *CnController) validating(src *srapi.StarRocksCluster) error {
- spec := src.Spec.StarRocksCnSpec
-
+func (cc *CnController) validating(cnSpec *srapi.StarRocksCnSpec) error {
// validating the auto scaling policy
- policy := spec.AutoScalingPolicy
+ policy := cnSpec.AutoScalingPolicy
if policy != nil {
minReplicas := int32(1) // default value
if policy.MinReplicas != nil {
@@ -359,6 +492,19 @@ func (cc *CnController) validating(src *srapi.StarRocksCluster) error {
return fmt.Errorf("the MaxReplicas must not be smaller than MinReplicas")
}
}
-
return nil
}
+
+// getStarRocksCluster get the StarRocksCluster object by namespace and name.
+func (cc *CnController) getStarRocksCluster(namespace, name string) (*srapi.StarRocksCluster, error) {
+ src := &srapi.StarRocksCluster{}
+ err := cc.k8sClient.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, src)
+ if err != nil {
+ return nil, err
+ }
+ return src, nil
+}
+
+func (cc *CnController) generateAutoScalerName(srcName string, cnSpec srapi.SpecInterface) string {
+ return load.Name(srcName, cnSpec) + "-autoscaler"
+}
diff --git a/pkg/sub_controller/cn/cn_controller_test.go b/pkg/sub_controller/cn/cn_controller_test.go
index aa7c4f60..5c3e4e21 100644
--- a/pkg/sub_controller/cn/cn_controller_test.go
+++ b/pkg/sub_controller/cn/cn_controller_test.go
@@ -22,7 +22,9 @@ import (
rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
+ "github.com/davecgh/go-spew/spew"
"github.com/stretchr/testify/require"
appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -32,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
@@ -40,8 +43,7 @@ var (
)
func init() {
- groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1alpha1"}
-
+ groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
schemeBuilder := &scheme.Builder{GroupVersion: groupVersion}
_ = clientgoscheme.AddToScheme(sch)
@@ -143,9 +145,9 @@ func Test_Sync(t *testing.T) {
}
cc := New(k8sutils.NewFakeClient(sch, src, &ep))
- err := cc.Sync(context.Background(), src)
+ err := cc.SyncCluster(context.Background(), src)
require.Equal(t, nil, err)
- err = cc.UpdateStatus(src)
+ err = cc.UpdateClusterStatus(src)
require.Equal(t, nil, err)
ccStatus := src.Status.StarRocksCnStatus
require.Equal(t, srapi.ComponentReconciling, ccStatus.Phase)
@@ -164,3 +166,113 @@ func Test_Sync(t *testing.T) {
types.NamespacedName{Name: load.Name(src.Name, cnSpec), Namespace: "default"}, &st))
require.Equal(t, asvc.Spec.Selector, st.Spec.Selector.MatchLabels)
}
+
+func TestCnController_UpdateStatus(t *testing.T) {
+ type fields struct {
+ k8sClient client.Client
+ }
+ type args struct {
+ object object.StarRocksObject
+ cnSpec *srapi.StarRocksCnSpec
+ cnStatus *srapi.StarRocksCnStatus
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ wantErr bool
+ }{
+ {
+ name: "update the status of cluster",
+ fields: fields{
+ k8sClient: k8sutils.NewFakeClient(sch,
+ &appv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cn",
+ Namespace: "default",
+ },
+ Spec: appv1.StatefulSetSpec{
+ UpdateStrategy: appv1.StatefulSetUpdateStrategy{
+ Type: appv1.RollingUpdateStatefulSetStrategyType,
+ },
+ },
+ Status: appv1.StatefulSetStatus{
+ ObservedGeneration: 1,
+ },
+ },
+ &srapi.StarRocksCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Spec: srapi.StarRocksClusterSpec{},
+ Status: srapi.StarRocksClusterStatus{},
+ },
+ ),
+ },
+ args: args{
+ object: object.StarRocksObject{
+ ObjectMeta: &metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ ClusterName: "test",
+ Kind: object.StarRocksClusterKind,
+ AliasName: "test",
+ },
+ cnSpec: &srapi.StarRocksCnSpec{},
+ cnStatus: &srapi.StarRocksCnStatus{},
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cc := &CnController{
+ k8sClient: tt.fields.k8sClient,
+ }
+ if err := cc.UpdateStatus(tt.args.object, tt.args.cnSpec, tt.args.cnStatus); (err != nil) != tt.wantErr {
+ t.Errorf("UpdateStatus() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ spew.Dump(tt.args.cnStatus)
+ })
+ }
+}
+
+func TestCnController_generateAutoScalerName(t *testing.T) {
+ type fields struct {
+ k8sClient client.Client
+ }
+ type args struct {
+ srcName string
+ cnSpec srapi.SpecInterface
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ want string
+ }{
+ {
+ name: "test1",
+ fields: fields{
+ k8sClient: nil,
+ },
+ args: args{
+ srcName: "test",
+ cnSpec: (*srapi.StarRocksCnSpec)(nil),
+ },
+ want: "test-cn-autoscaler",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cc := &CnController{
+ k8sClient: tt.fields.k8sClient,
+ }
+ if got := cc.generateAutoScalerName(tt.args.srcName, tt.args.cnSpec); got != tt.want {
+ t.Errorf("generateAutoScalerName() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/sub_controller/cn/cn_mysql.go b/pkg/sub_controller/cn/cn_mysql.go
new file mode 100644
index 00000000..754261cb
--- /dev/null
+++ b/pkg/sub_controller/cn/cn_mysql.go
@@ -0,0 +1,87 @@
+package cn
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ _ "github.com/go-sql-driver/mysql" // import mysql driver
+ appv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// SQLExecutor is used to execute sql statements.
+// Component CN needs to connect to mysql and execute sql statements. E.g.: When StarRocksWarehouse is deleted, the
+// related 'DROP WAREHOUSE ' statement needs to be executed.
+type SQLExecutor struct {
+ RootPassword string
+ FeServiceName string
+ FeServicePort string
+}
+
+// NewSQLExecutor creates a SQLExecutor instance. It will get the root password, fe service name, and fe service port
+// from the environment variables of the component CN.
+func NewSQLExecutor(k8sClient client.Client, namespace, aliasName string) (*SQLExecutor, error) {
+ rootPassword := ""
+ feServiceName := ""
+ feServicePort := ""
+
+ var est appv1.StatefulSet
+ if err := k8sClient.Get(context.Background(),
+ types.NamespacedName{Namespace: namespace, Name: load.Name(aliasName, (*srapi.StarRocksCnSpec)(nil))},
+ &est); err != nil {
+ return nil, err
+ }
+
+ var err error
+ for _, envVar := range est.Spec.Template.Spec.Containers[0].Env {
+ if envVar.Name == "MYSQL_PWD" {
+ rootPassword, err = k8sutils.GetEnvVarValue(k8sClient, namespace, envVar)
+ if err != nil {
+ klog.Infof("failed to get MYSQL_PWD from env vars, err: %v", err)
+ klog.Infof("use the default password: empty string")
+ }
+ } else if envVar.Name == "FE_SERVICE_NAME" {
+ feServiceName, err = k8sutils.GetEnvVarValue(k8sClient, namespace, envVar)
+ if err != nil {
+ klog.Errorf("failed to get FE_SERVICE_NAME from env vars, err: %v", err)
+ return nil, err
+ }
+ } else if envVar.Name == "FE_QUERY_PORT" {
+ feServicePort, err = k8sutils.GetEnvVarValue(k8sClient, namespace, envVar)
+ if err != nil {
+ klog.Errorf("failed to get FE_QUERY_PORT from env vars, err: %v", err)
+ return nil, err
+ }
+ }
+ }
+
+ return &SQLExecutor{
+ RootPassword: rootPassword,
+ FeServiceName: feServiceName,
+ FeServicePort: feServicePort,
+ }, nil
+}
+
+// Execute sql statements. Every time a SQL statement needs to be executed, a new sql.DB instance will be created.
+// This is because SQL statements are executed infrequently.
+func (executor *SQLExecutor) Execute(ctx context.Context, statements string) error {
+ db, err := sql.Open("mysql", fmt.Sprintf("root:%s@tcp(%s:%s)/",
+ executor.RootPassword, executor.FeServiceName, executor.FeServicePort))
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ _, err = db.ExecContext(ctx, statements)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/sub_controller/cn/cn_pod.go b/pkg/sub_controller/cn/cn_pod.go
index 2f79f481..d4b0b0c7 100644
--- a/pkg/sub_controller/cn/cn_pod.go
+++ b/pkg/sub_controller/cn/cn_pod.go
@@ -17,12 +17,21 @@ limitations under the License.
package cn
import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+
srapi "github.com/StarRocks/starrocks-kubernetes-operator/pkg/apis/starrocks/v1"
rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
+ srobject "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/pod"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog/v2"
)
const (
@@ -33,10 +42,8 @@ const (
)
// buildPodTemplate construct the podTemplate for deploy cn.
-func (cc *CnController) buildPodTemplate(src *srapi.StarRocksCluster, config map[string]interface{}) corev1.PodTemplateSpec {
- metaName := src.Name + "-" + srapi.DEFAULT_CN
- cnSpec := src.Spec.StarRocksCnSpec
-
+func (cc *CnController) buildPodTemplate(object srobject.StarRocksObject,
+ cnSpec *srapi.StarRocksCnSpec, config map[string]interface{}) (*corev1.PodTemplateSpec, error) {
vols, volumeMounts, vexist := pod.MountStorageVolumes(cnSpec)
// add default volume about log
if _, ok := vexist[_logPath]; !ok {
@@ -57,16 +64,29 @@ func (cc *CnController) buildPodTemplate(src *srapi.StarRocksCluster, config map
vols, volumeMounts = pod.MountConfigMaps(vols, volumeMounts, cnSpec.ConfigMaps)
vols, volumeMounts = pod.MountSecrets(vols, volumeMounts, cnSpec.Secrets)
- feExternalServiceName := service.ExternalServiceName(src.Name, src.Spec.StarRocksFeSpec)
- Envs := pod.Envs(src.Spec.StarRocksCnSpec, config, feExternalServiceName, src.Namespace, cnSpec.CnEnvVars)
+ feExternalServiceName := service.ExternalServiceName(object.ClusterName, (*srapi.StarRocksFeSpec)(nil))
+ envs := pod.Envs(cnSpec, config, feExternalServiceName, object.Namespace, cnSpec.CnEnvVars)
webServerPort := rutils.GetPort(config, rutils.WEBSERVER_PORT)
+ if object.Kind == srobject.StarRocksWarehouseKind {
+ if cc.addWarehouseEnv(feExternalServiceName,
+ strconv.FormatInt(int64(rutils.GetPort(config, rutils.HTTP_PORT)), 10)) {
+ envs = append(envs, corev1.EnvVar{
+ Name: "KUBE_STARROCKS_MULTI_WAREHOUSE",
+ // the cn_entrypoint.sh in container will use this env to create warehouse. Because of '-' character
+ // is not allowed in Warehouse SQL, so we replace it with '_'.
+ Value: strings.ReplaceAll(object.Name, "-", "_"),
+ })
+ } else {
+ return nil, GetFeFeatureInfoError
+ }
+ }
cnContainer := corev1.Container{
Name: srapi.DEFAULT_CN,
Image: cnSpec.Image,
Command: []string{"/opt/starrocks/cn_entrypoint.sh"},
Args: []string{"$(FE_SERVICE_NAME)"},
Ports: pod.Ports(cnSpec, config),
- Env: Envs,
+ Env: envs,
Resources: cnSpec.ResourceRequirements,
ImagePullPolicy: corev1.PullIfNotPresent,
VolumeMounts: volumeMounts,
@@ -84,16 +104,78 @@ func (cc *CnController) buildPodTemplate(src *srapi.StarRocksCluster, config map
})
}
- podSpec := pod.Spec(cnSpec, src.Spec.ServiceAccount, cnContainer, vols)
+ podSpec := pod.Spec(cnSpec, cnContainer, vols)
annotations := pod.Annotations(cnSpec)
podSpec.SecurityContext = pod.PodSecurityContext(cnSpec)
- return corev1.PodTemplateSpec{
+ metaName := object.Name + "-" + srapi.DEFAULT_CN
+ return &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
+ // Name should not define in here, but it is used to compute the value of srapi.ComponentResourceHash
Name: metaName,
Annotations: annotations,
- Namespace: src.Namespace,
- Labels: pod.Labels(src.Name, cnSpec),
+ Namespace: object.Namespace,
+ Labels: pod.Labels(object.AliasName, cnSpec),
},
Spec: podSpec,
+ }, nil
+}
+
+// addWarehouseEnv add env to cn pod if FE support multi-warehouse
+// call FE /api/v2/feature to make sure FE support multi-warehouse
+// the response is like:
+//
+// {
+// "features": [
+// {
+// "name": "Feature Name",
+// "description": "Feature Description",
+// "link": "https://github.com/starrocksdb/starrocks/issues/new"
+// }
+// ],
+// "version": "feature/add-api-feature-interface",
+// "status": "OK"
+// }
+func (cc *CnController) addWarehouseEnv(feExternalServiceName string, feHTTPPort string) bool {
+ klog.Infof("call FE to get features information")
+ resp, err := http.Get(fmt.Sprintf("http://%s:%s/api/v2/feature", feExternalServiceName, feHTTPPort))
+ if err != nil {
+ klog.Errorf("failed to get features information from FE, err: %v", err)
+ return false
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ klog.Infof("FE return status code: %d", resp.StatusCode)
+ return false
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ klog.Errorf("failed to read response body, err: %v", err)
+ return false
+ }
+
+ result := struct {
+ Features []struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Link string `json:"link"`
+ } `json:"features"`
+ Version string `json:"version"`
+ Status string `json:"status"`
+ }{}
+ err = json.Unmarshal(body, &result)
+ if err != nil {
+ klog.Errorf("failed to unmarshal response body, err: %v", err)
+ return false
+ }
+
+ for _, feature := range result.Features {
+ if feature.Name == "multi-warehouse" {
+ klog.Infof("FE support multi-warehouse")
+ return true
+ }
}
+ klog.Infof("FE does not support multi-warehouse")
+ return false
}
diff --git a/pkg/sub_controller/fe/fe_controller.go b/pkg/sub_controller/fe/fe_controller.go
index eda85a49..82569705 100644
--- a/pkg/sub_controller/fe/fe_controller.go
+++ b/pkg/sub_controller/fe/fe_controller.go
@@ -23,6 +23,7 @@ import (
rutils "github.com/StarRocks/starrocks-kubernetes-operator/pkg/common/resource_utils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/pod"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/statefulset"
@@ -51,8 +52,8 @@ func (fc *FeController) GetControllerName() string {
return "feController"
}
-// Sync starRocksCluster spec to fe statefulset and service.
-func (fc *FeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) error {
+// SyncCluster starRocksCluster spec to fe statefulset and service.
+func (fc *FeController) SyncCluster(ctx context.Context, src *srapi.StarRocksCluster) error {
if src.Spec.StarRocksFeSpec == nil {
klog.Infof("FeController Sync: the fe component is not needed, namespace = %v, starrocks cluster name = %v", src.Namespace, src.Name)
return nil
@@ -69,8 +70,8 @@ func (fc *FeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
}
// generate new fe service.
- svc := rutils.BuildExternalService(src, service.ExternalServiceName(src.Name, src.Spec.StarRocksFeSpec), rutils.FeService, config,
- load.Selector(src.Name, feSpec), load.Labels(src.Name, feSpec))
+ object := object.NewFromCluster(src)
+ svc := rutils.BuildExternalService(object, feSpec, config, load.Selector(src.Name, feSpec), load.Labels(src.Name, feSpec))
// create or update fe external and domain search service, update the status of fe on src.
searchServiceName := service.SearchServiceName(src.Name, feSpec)
internalService := service.MakeSearchService(searchServiceName, &svc, []corev1.ServicePort{
@@ -84,7 +85,7 @@ func (fc *FeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
// first deploy statefulset for compatible v1.5, apply statefulset for update pod.
podTemplateSpec := fc.buildPodTemplate(src, config)
- st := statefulset.MakeStatefulset(src, feSpec, podTemplateSpec)
+ st := statefulset.MakeStatefulset(object, feSpec, podTemplateSpec)
if err = k8sutils.ApplyStatefulSet(ctx, fc.k8sClient, &st, func(new *appv1.StatefulSet, est *appv1.StatefulSet) bool {
// if have restart annotation, we should exclude the interference for comparison.
return rutils.StatefulSetDeepEqual(new, est, false)
@@ -112,8 +113,8 @@ func (fc *FeController) Sync(ctx context.Context, src *srapi.StarRocksCluster) e
return nil
}
-// UpdateStatus update the all resource status about fe.
-func (fc *FeController) UpdateStatus(src *srapi.StarRocksCluster) error {
+// UpdateClusterStatus update the all resource status about fe.
+func (fc *FeController) UpdateClusterStatus(src *srapi.StarRocksCluster) error {
// if spec is not exist, status is empty. but before clear status we must clear all resource about be used by ClearResources.
feSpec := src.Spec.StarRocksFeSpec
if feSpec == nil {
@@ -205,14 +206,18 @@ func (fc *FeController) ClearResources(ctx context.Context, src *srapi.StarRocks
return nil
}
-// CheckFEOk check the fe cluster is ok for add cn node.
-func CheckFEOk(ctx context.Context, k8sClient client.Client, src *srapi.StarRocksCluster) bool {
+// CheckFEReady check the fe cluster is ok for add cn node.
+func CheckFEReady(ctx context.Context, k8sClient client.Client, clusterNamespace, clusterName string) bool {
endpoints := corev1.Endpoints{}
- // 1. wait for fe ok.
- externalServiceName := service.ExternalServiceName(src.Name, src.Spec.StarRocksFeSpec)
+ serviceName := service.ExternalServiceName(clusterName, (*srapi.StarRocksFeSpec)(nil))
+ // 1. wait for FE ready.
if err := k8sClient.Get(ctx,
- types.NamespacedName{Namespace: src.Namespace, Name: externalServiceName}, &endpoints); err != nil {
- klog.Errorf("waiting fe available, fe service name %s, occur failed %s", externalServiceName, err.Error())
+ types.NamespacedName{
+ Namespace: clusterNamespace,
+ Name: serviceName,
+ },
+ &endpoints); err != nil {
+ klog.Errorf("waiting fe available, fe service name %s, occur failed %s", serviceName, err.Error())
return false
}
diff --git a/pkg/sub_controller/fe/fe_controller_test.go b/pkg/sub_controller/fe/fe_controller_test.go
index 3baae00f..b5972868 100644
--- a/pkg/sub_controller/fe/fe_controller_test.go
+++ b/pkg/sub_controller/fe/fe_controller_test.go
@@ -45,7 +45,7 @@ var (
)
func init() {
- groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1alpha1"}
+ groupVersion := schema.GroupVersion{Group: "starrocks.com", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
schemeBuilder := &scheme.Builder{GroupVersion: groupVersion}
@@ -159,9 +159,9 @@ func Test_SyncDeploy(t *testing.T) {
fc := New(k8sutils.NewFakeClient(sch, src))
- err := fc.Sync(context.Background(), src)
+ err := fc.SyncCluster(context.Background(), src)
require.Equal(t, nil, err)
- err = fc.UpdateStatus(src)
+ err = fc.UpdateClusterStatus(src)
require.Equal(t, nil, err)
festatus := src.Status.StarRocksFeStatus
require.Equal(t, nil, err)
diff --git a/pkg/sub_controller/fe/fe_pod.go b/pkg/sub_controller/fe/fe_pod.go
index 7ff93287..6f54623f 100644
--- a/pkg/sub_controller/fe/fe_pod.go
+++ b/pkg/sub_controller/fe/fe_pod.go
@@ -74,7 +74,7 @@ func (fc *FeController) buildPodTemplate(src *srapi.StarRocksCluster, config map
vols, volMounts = pod.MountSecrets(vols, volMounts, feSpec.Secrets)
feExternalServiceName := service.ExternalServiceName(src.Name, feSpec)
- Envs := pod.Envs(src.Spec.StarRocksFeSpec, config, feExternalServiceName, src.Namespace, feSpec.FeEnvVars)
+ envs := pod.Envs(src.Spec.StarRocksFeSpec, config, feExternalServiceName, src.Namespace, feSpec.FeEnvVars)
httpPort := rutils.GetPort(config, rutils.HTTP_PORT)
feContainer := corev1.Container{
Name: srapi.DEFAULT_FE,
@@ -82,7 +82,7 @@ func (fc *FeController) buildPodTemplate(src *srapi.StarRocksCluster, config map
Command: []string{"/opt/starrocks/fe_entrypoint.sh"},
Args: []string{"$(FE_SERVICE_NAME)"},
Ports: pod.Ports(feSpec, config),
- Env: Envs,
+ Env: envs,
Resources: feSpec.ResourceRequirements,
VolumeMounts: volMounts,
ImagePullPolicy: corev1.PullIfNotPresent,
@@ -100,7 +100,7 @@ func (fc *FeController) buildPodTemplate(src *srapi.StarRocksCluster, config map
})
}
- podSpec := pod.Spec(feSpec, src.Spec.ServiceAccount, feContainer, vols)
+ podSpec := pod.Spec(feSpec, feContainer, vols)
annotations := pod.Annotations(feSpec)
podSpec.SecurityContext = pod.PodSecurityContext(feSpec)
return corev1.PodTemplateSpec{
diff --git a/pkg/sub_controller/feproxy/feproxy_controller.go b/pkg/sub_controller/feproxy/feproxy_controller.go
index 9b0293e6..c5feaa38 100644
--- a/pkg/sub_controller/feproxy/feproxy_controller.go
+++ b/pkg/sub_controller/feproxy/feproxy_controller.go
@@ -25,6 +25,7 @@ import (
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/load"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/deployment"
+ "github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/object"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/pod"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/k8sutils/templates/service"
"github.com/StarRocks/starrocks-kubernetes-operator/pkg/sub_controller"
@@ -42,7 +43,7 @@ type FeProxyController struct {
k8sClient client.Client
}
-var _ sub_controller.SubController = &FeProxyController{}
+var _ sub_controller.ClusterSubController = &FeProxyController{}
// New construct a FeController.
func New(k8sClient client.Client) *FeProxyController {
@@ -55,8 +56,7 @@ func (controller *FeProxyController) GetControllerName() string {
return "feProxyController"
}
-// Sync starRocksCluster spec to fe statefulset and service.
-func (controller *FeProxyController) Sync(ctx context.Context, src *srapi.StarRocksCluster) error {
+func (controller *FeProxyController) SyncCluster(ctx context.Context, src *srapi.StarRocksCluster) error {
feProxySpec := src.Spec.StarRocksFeProxySpec
if feProxySpec == nil {
klog.Infof("FeProxyController Sync: the fe proxy component is not needed, namespace = %v, "+
@@ -69,7 +69,7 @@ func (controller *FeProxyController) Sync(ctx context.Context, src *srapi.StarRo
return nil
}
- if !fe.CheckFEOk(ctx, controller.k8sClient, src) {
+ if !fe.CheckFEReady(ctx, controller.k8sClient, src.Namespace, src.Name) {
return nil
}
@@ -92,8 +92,8 @@ func (controller *FeProxyController) Sync(ctx context.Context, src *srapi.StarRo
}
// sync fe proxy service
- externalServiceName := service.ExternalServiceName(src.Name, feProxySpec)
- externalsvc := rutils.BuildExternalService(src, externalServiceName, rutils.FeProxyService, nil,
+ object := object.NewFromCluster(src)
+ externalsvc := rutils.BuildExternalService(object, feProxySpec, nil,
load.Selector(src.Name, feProxySpec), load.Labels(src.Name, feProxySpec))
if err := k8sutils.ApplyService(ctx, controller.k8sClient, &externalsvc, rutils.ServiceDeepEqual); err != nil {
return err
@@ -102,8 +102,8 @@ func (controller *FeProxyController) Sync(ctx context.Context, src *srapi.StarRo
return nil
}
-// UpdateStatus update the all resource status about fe.
-func (controller *FeProxyController) UpdateStatus(src *srapi.StarRocksCluster) error {
+// UpdateClusterStatus update the all resource status about fe.
+func (controller *FeProxyController) UpdateClusterStatus(src *srapi.StarRocksCluster) error {
feProxySpec := src.Spec.StarRocksFeProxySpec
if feProxySpec == nil {
src.Status.StarRocksFeProxyStatus = nil
@@ -128,11 +128,11 @@ func (controller *FeProxyController) UpdateStatus(src *srapi.StarRocksCluster) e
}, &actual)
if err != nil {
if apierrors.IsNotFound(err) {
- klog.Infof("FeProxyController UpdateStatus: fe proxy deployment is not found, "+
+ klog.Infof("FeProxyController UpdateClusterStatus: fe proxy deployment is not found, "+
"namespace = %v, starrocks cluster name = %v", src.Namespace, src.Name)
return nil
}
- klog.Errorf("FeProxyController UpdateStatus: get fe proxy deployment failed, "+
+ klog.Errorf("FeProxyController UpdateClusterStatus: get fe proxy deployment failed, "+
"namespace = %v, starrocks cluster name = %v, err = %v", src.Namespace, src.Name, err)
return err
}
@@ -218,7 +218,7 @@ func (controller *FeProxyController) buildPodTemplate(src *srapi.StarRocksCluste
ReadOnlyRootFilesystem: func() *bool { b := false; return &b }(),
}
- podSpec := pod.Spec(feProxySpec, src.Spec.ServiceAccount, container, vols)
+ podSpec := pod.Spec(feProxySpec, container, vols)
return corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: feProxySpec.GetAnnotations(),
diff --git a/pkg/sub_controller/subcontroller.go b/pkg/sub_controller/subcontroller.go
index 32a46377..e5c3561d 100644
--- a/pkg/sub_controller/subcontroller.go
+++ b/pkg/sub_controller/subcontroller.go
@@ -31,9 +31,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-type SubController interface {
- // Sync reconcile for sub controller. bool represent the component have updated.
- Sync(ctx context.Context, src *srapi.StarRocksCluster) error
+type ClusterSubController interface {
+ // SyncCluster reconcile for sub controller. bool represent the component have updated.
+ SyncCluster(ctx context.Context, src *srapi.StarRocksCluster) error
// ClearResources clear all resource about sub-component.
ClearResources(ctx context.Context, src *srapi.StarRocksCluster) error
@@ -41,8 +41,19 @@ type SubController interface {
// GetControllerName return the controller name, beController, feController,cnController for log.
GetControllerName() string
- // UpdateStatus update the component status on src.
- UpdateStatus(src *srapi.StarRocksCluster) error
+ // UpdateClusterStatus update the component status on src.
+ UpdateClusterStatus(src *srapi.StarRocksCluster) error
+}
+
+type WarehouseSubController interface {
+ // ClearWarehouse will clear all resource about warehouse.
+ ClearWarehouse(ctx context.Context, namespace string, name string) error
+
+ SyncWarehouse(ctx context.Context, src *srapi.StarRocksWarehouse) error
+
+ GetControllerName() string
+
+ UpdateWarehouseStatus(warehouse *srapi.StarRocksWarehouse) error
}
type LoadType string
diff --git a/pkg/suite_test.go b/pkg/suite_test.go
index dc59600a..201a67b8 100644
--- a/pkg/suite_test.go
+++ b/pkg/suite_test.go
@@ -41,14 +41,6 @@ var testEnv *envtest.Environment
var ctx context.Context
var cancel context.CancelFunc
-/*func TestAPIs(t *testing.T) {
- RegisterFailHandler(Fail)
-
- RunSpecsWithDefaultAndCustomReporters(t,
- "Controller Suite",
- []Reporter{printer.NewlineReporter{}})
-}*/
-
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
new file mode 100644
index 00000000..2de28da1
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -0,0 +1,9 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 00000000..fb1478c3
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,126 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Achille Roussel
+Alex Snast
+Alexey Palazhchenko
+Andrew Reid
+Animesh Ray
+Arne Hormann
+Ariel Mashraki
+Asta Xie
+Bulat Gaifullin
+Caine Jette
+Carlos Nieto
+Chris Kirkland
+Chris Moos
+Craig Wilson
+Daniel Montoya
+Daniel Nichter
+Daniël van Eeden
+Dave Protasowski
+DisposaBoy
+Egor Smolyakov
+Erwan Martin
+Evan Shaw
+Frederick Mayle
+Gustavo Kristic
+Hajime Nakagami
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+Huyiguang
+ICHINOSE Shogo
+Ilia Cimpoes
+INADA Naoki
+Jacek Szwec
+James Harr
+Janek Vedock
+Jeff Hodges
+Jeffrey Charles
+Jerome Meyer
+Jiajia Zhong
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Justin Li
+Justin Nuß
+Kamil Dziedzic
+Kei Kamikawa
+Kevin Malachowski
+Kieron Woodhouse
+Lance Tian
+Lennart Rudolph
+Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
+Luca Looz
+Lucas Liu
+Lunny Xiao
+Luke Scott
+Maciej Zimnoch
+Michael Woolnough
+Nathanial Murphy
+Nicola Peduzzi
+Olivier Mengué
+oscarzhao
+Paul Bonser
+Peter Schultz
+Phil Porada
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
+Runrioter Wung
+Samantha Frank
+Santhosh Kumar Tekuri
+Sho Iizuka
+Sho Ikeda
+Shuode Li
+Simon J Mudd
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Steven Hartland
+Tan Jinhua <312841925 at qq.com>
+Thomas Wodarek
+Tim Ruffles
+Tom Jenkinson
+Vladimir Kovpak
+Vladyslav Zhelezniak
+Xiangyu Hu
+Xiaobing Jiang
+Xiuming Chen
+Xuehong Chan
+Zhenye Xie
+Zhixin Wen
+Ziheng Lyu
+
+# Organizations
+
+Barracuda Networks, Inc.
+Counting Ltd.
+DigitalOcean Inc.
+dyves labs AG
+Facebook Inc.
+GitHub Inc.
+Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Multiplay Ltd.
+Percona LLC
+Pivotal Inc.
+Stripe Inc.
+Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 00000000..5166e4ad
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,266 @@
+## Version 1.7.1 (2023-04-25)
+
+Changes:
+
+ - bump actions/checkout@v3 and actions/setup-go@v3 (#1375)
+ - Add go1.20 and mariadb10.11 to the testing matrix (#1403)
+ - Increase default maxAllowedPacket size. (#1411)
+
+Bugfixes:
+
+ - Use SET syntax as specified in the MySQL documentation (#1402)
+
+
+## Version 1.7 (2022-11-29)
+
+Changes:
+
+ - Drop support of Go 1.12 (#1211)
+ - Refactoring `(*textRows).readRow` in a more clear way (#1230)
+ - util: Reduce boundary check in escape functions. (#1316)
+ - enhancement for mysqlConn handleAuthResult (#1250)
+
+New Features:
+
+ - support Is comparison on MySQLError (#1210)
+ - return unsigned in database type name when necessary (#1238)
+ - Add API to express like a --ssl-mode=PREFERRED MySQL client (#1370)
+ - Add SQLState to MySQLError (#1321)
+
+Bugfixes:
+
+ - Fix parsing 0 year. (#1257)
+
+
+## Version 1.6 (2021-04-01)
+
+Changes:
+
+ - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
+ - `NullTime` is deprecated (#960, #1144)
+ - Reduce allocations when building SET command (#1111)
+ - Performance improvement for time formatting (#1118)
+ - Performance improvement for time parsing (#1098, #1113)
+
+New Features:
+
+ - Implement `driver.Validator` interface (#1106, #1174)
+ - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
+ - Add `json.RawMessage` for converter and prepared statement (#1059)
+ - Interpolate `json.RawMessage` as `string` (#1058)
+ - Implements `CheckNamedValue` (#1090)
+
+Bugfixes:
+
+ - Stop rounding times (#1121, #1172)
+ - Put zero filler into the SSL handshake packet (#1066)
+ - Fix checking cancelled connections back into the connection pool (#1095)
+ - Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
+
+
+## Version 1.5 (2020-01-07)
+
+Changes:
+
+ - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
+ - Improve buffer handling (#890)
+ - Document potentially insecure TLS configs (#901)
+ - Use a double-buffering scheme to prevent data races (#943)
+ - Pass uint64 values without converting them to string (#838, #955)
+ - Update collations and make utf8mb4 default (#877, #1054)
+ - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
+ - Removed CloudSQL support (#993, #1007)
+ - Add Go Module support (#1003)
+
+New Features:
+
+ - Implement support of optional TLS (#900)
+ - Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
+ - Implement Connector Interface (#941, #958, #1020, #1035)
+
+Bugfixes:
+
+ - Mark connections as bad on error during ping (#875)
+ - Mark connections as bad on error during dial (#867)
+ - Fix connection leak caused by rapid context cancellation (#1024)
+ - Mark connections as bad on error during Conn.Prepare (#1030)
+
+
+## Version 1.4.1 (2018-11-14)
+
+Bugfixes:
+
+ - Fix TIME format for binary columns (#818)
+ - Fix handling of empty auth plugin names (#835)
+ - Fix caching_sha2_password with empty password (#826)
+ - Fix canceled context broke mysqlConn (#862)
+ - Fix OldAuthSwitchRequest support (#870)
+ - Fix Auth Response packet for cleartext password (#887)
+
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 00000000..14e2f777
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 00000000..3b5d229a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,531 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+
+```go
+import (
+ "database/sql"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+)
+
+// ...
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+if err != nil {
+ panic(err)
+}
+// See "Important settings" section.
+db.SetConnMaxLifetime(time.Minute * 3)
+db.SetMaxOpenConns(10)
+db.SetMaxIdleConns(10)
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+### Important settings
+
+`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
+
+`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
+
+`db.SetMaxIdleConns()` is recommended to be set same to `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed much more frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+
+##### `allowFallbackToPlaintext`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowFallbackToPlaintext=true` acts like a `--ssl-mode=PREFERRED` MySQL client as described in [Command Options for Connecting to the Server](https://dev.mysql.com/doc/refman/5.7/en/connection-options.html#option_general_ssl-mode)
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `checkConnLiveness`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+
+On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
+`checkConnLiveness=false` disables this liveness check of connections.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8mb4_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
+
+Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 64*1024*1024
+```
+
+Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
+
+
+##### `readTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
+
+
+##### `timeout`
+
+```
+Type: duration
+Default: OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify, preferred,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+ * `=`: `SET =`
+ * `=`: `SET =`
+ * `=%27%27`: `SET =''`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine:
+```
+user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+
+### Unicode support
+Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
new file mode 100644
index 00000000..1b7e19f3
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
@@ -0,0 +1,19 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+//go:build go1.19
+// +build go1.19
+
+package mysql
+
+import "sync/atomic"
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+type atomicBool = atomic.Bool
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
new file mode 100644
index 00000000..2e9a7f0b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
@@ -0,0 +1,47 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+//go:build !go1.19
+// +build !go1.19
+
+package mysql
+
+import "sync/atomic"
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// atomicBool is an implementation of atomic.Bool for older version of Go.
+// it is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _ noCopy
+ value uint32
+}
+
+// Load returns whether the current boolean value is true
+func (ab *atomicBool) Load() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Store sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Store(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// Swap sets the value of the bool and returns the old value.
+func (ab *atomicBool) Swap(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) > 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 00000000..1ff203e5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,437 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, ErrOldPassword
+ }
+ if len(mc.cfg.Passwd) == 0 {
+ return nil, nil
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
+ return authResp, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return append([]byte(mc.cfg.Passwd), 0), nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return []byte{0}, nil
+ }
+ // unlike caching_sha2_password, sha256_password does not accept
+ // cleartext password on unix transport.
+ if mc.cfg.TLS != nil {
+ // write cleartext auth packet
+ return append([]byte(mc.cfg.Passwd), 0), nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.TLS != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ return err
+ }
+ data[4] = cachingSha2PasswordRequestPublicKey
+ err = mc.writePacket(data)
+ if err != nil {
+ return err
+ }
+
+ if data, err = mc.readPacket(); err != nil {
+ return err
+ }
+
+ if data[0] != iAuthMoreData {
+ return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ }
+
+ // parse public key
+ block, rest := pem.Decode(data[1:])
+ if block == nil {
+ return fmt.Errorf("No Pem data found, data: %s", rest)
+ }
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ if block == nil {
+ return fmt.Errorf("no Pem data found, data: %s", authData)
+ }
+
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 00000000..0774c5c8
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+const maxCachedBufSize = 256 * 1024
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+// This buffer is backed by two byte slices in a double-buffering scheme
+type buffer struct {
+ buf []byte // buf is a byte buffer who's length and capacity are equal.
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+ dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
+ flipcnt uint // flipccnt is the current buffer counter for double-buffering
+}
+
+// newBuffer allocates and returns a new buffer.
+func newBuffer(nc net.Conn) buffer {
+ fg := make([]byte, defaultBufSize)
+ return buffer{
+ buf: fg,
+ nc: nc,
+ dbuf: [2][]byte{fg, nil},
+ }
+}
+
+// flip replaces the active buffer with the background buffer
+// this is a delayed flip that simply increases the buffer counter;
+// the actual flip will be performed the next time we call `buffer.fill`
+func (b *buffer) flip() {
+ b.flipcnt += 1
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+ // fill data into its double-buffering target: if we've called
+ // flip on this buffer, we'll be copying to the background buffer,
+ // and then filling it with network data; otherwise we'll just move
+ // the contents of the current buffer to the front before filling it
+ dest := b.dbuf[b.flipcnt&1]
+
+ // grow buffer if necessary to fit the whole packet.
+ if need > len(dest) {
+ // Round up to the next multiple of the default size
+ dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+
+ // if the allocated buffer is not too large, move it to backing storage
+ // to prevent extra allocations on applications that perform large reads
+ if len(dest) <= maxCachedBufSize {
+ b.dbuf[b.flipcnt&1] = dest
+ }
+ }
+
+ // if we're filling the fg buffer, move the existing data to the start of it.
+ // if we're filling the bg buffer, copy over the data
+ if n > 0 {
+ copy(dest[:n], b.buf[b.idx:])
+ }
+
+ b.buf = dest
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// takeBuffer returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+
+ // test (cheap) general case first
+ if length <= cap(b.buf) {
+ return b.buf[:length], nil
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf, nil
+ }
+
+ // buffer is larger than we want to store.
+ return make([]byte, length), nil
+}
+
+// takeSmallBuffer is shortcut which can be used if length is
+// known to be smaller than defaultBufSize.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf[:length], nil
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// cap and len of the returned buffer will be equal.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf, nil
+}
+
+// store stores buf, an updated buffer, if its suitable to do so.
+func (b *buffer) store(buf []byte) error {
+ if b.length > 0 {
+ return ErrBusyBuffer
+ } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
+ b.buf = buf[:cap(buf)]
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 00000000..295bfbe5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,266 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8mb4_general_ci"
+const binaryCollation = "binary"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+//
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
+//
+// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
+//
+// ucs2, utf16, and utf32 can't be used for connection charset.
+// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
+// They are commented out to reduce this map.
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ //"ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ //"utf16_general_ci": 54,
+ //"utf16_bin": 55,
+ //"utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ //"utf32_general_ci": 60,
+ //"utf32_bin": 61,
+ //"utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "utf8_tolower_ci": 76,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ //"ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ //"utf16_unicode_ci": 101,
+ //"utf16_icelandic_ci": 102,
+ //"utf16_latvian_ci": 103,
+ //"utf16_romanian_ci": 104,
+ //"utf16_slovenian_ci": 105,
+ //"utf16_polish_ci": 106,
+ //"utf16_estonian_ci": 107,
+ //"utf16_spanish_ci": 108,
+ //"utf16_swedish_ci": 109,
+ //"utf16_turkish_ci": 110,
+ //"utf16_czech_ci": 111,
+ //"utf16_danish_ci": 112,
+ //"utf16_lithuanian_ci": 113,
+ //"utf16_slovak_ci": 114,
+ //"utf16_spanish2_ci": 115,
+ //"utf16_roman_ci": 116,
+ //"utf16_persian_ci": 117,
+ //"utf16_esperanto_ci": 118,
+ //"utf16_hungarian_ci": 119,
+ //"utf16_sinhala_ci": 120,
+ //"utf16_german2_ci": 121,
+ //"utf16_croatian_ci": 122,
+ //"utf16_unicode_520_ci": 123,
+ //"utf16_vietnamese_ci": 124,
+ //"ucs2_unicode_ci": 128,
+ //"ucs2_icelandic_ci": 129,
+ //"ucs2_latvian_ci": 130,
+ //"ucs2_romanian_ci": 131,
+ //"ucs2_slovenian_ci": 132,
+ //"ucs2_polish_ci": 133,
+ //"ucs2_estonian_ci": 134,
+ //"ucs2_spanish_ci": 135,
+ //"ucs2_swedish_ci": 136,
+ //"ucs2_turkish_ci": 137,
+ //"ucs2_czech_ci": 138,
+ //"ucs2_danish_ci": 139,
+ //"ucs2_lithuanian_ci": 140,
+ //"ucs2_slovak_ci": 141,
+ //"ucs2_spanish2_ci": 142,
+ //"ucs2_roman_ci": 143,
+ //"ucs2_persian_ci": 144,
+ //"ucs2_esperanto_ci": 145,
+ //"ucs2_hungarian_ci": 146,
+ //"ucs2_sinhala_ci": 147,
+ //"ucs2_german2_ci": 148,
+ //"ucs2_croatian_ci": 149,
+ //"ucs2_unicode_520_ci": 150,
+ //"ucs2_vietnamese_ci": 151,
+ //"ucs2_general_mysql500_ci": 159,
+ //"utf32_unicode_ci": 160,
+ //"utf32_icelandic_ci": 161,
+ //"utf32_latvian_ci": 162,
+ //"utf32_romanian_ci": 163,
+ //"utf32_slovenian_ci": 164,
+ //"utf32_polish_ci": 165,
+ //"utf32_estonian_ci": 166,
+ //"utf32_spanish_ci": 167,
+ //"utf32_swedish_ci": 168,
+ //"utf32_turkish_ci": 169,
+ //"utf32_czech_ci": 170,
+ //"utf32_danish_ci": 171,
+ //"utf32_lithuanian_ci": 172,
+ //"utf32_slovak_ci": 173,
+ //"utf32_spanish2_ci": 174,
+ //"utf32_roman_ci": 175,
+ //"utf32_persian_ci": 176,
+ //"utf32_esperanto_ci": 177,
+ //"utf32_hungarian_ci": 178,
+ //"utf32_sinhala_ci": 179,
+ //"utf32_german2_ci": 180,
+ //"utf32_croatian_ci": 181,
+ //"utf32_unicode_520_ci": 182,
+ //"utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+ "gb18030_chinese_ci": 248,
+ "gb18030_bin": 249,
+ "gb18030_unicode_520_ci": 250,
+ "utf8mb4_0900_ai_ci": 255,
+}
+
+// A denylist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+ "gb18030_chinese_ci": true,
+ "gb18030_bin": true,
+ "gb18030_unicode_520_ci": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go
new file mode 100644
index 00000000..0ea72172
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go
@@ -0,0 +1,55 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
+
+package mysql
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ var sysErr error
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ err = rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
new file mode 100644
index 00000000..a56c138f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
@@ -0,0 +1,18 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
+// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
+
+package mysql
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 00000000..947a883e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,650 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+ reset bool // set when the Go SQL package calls ResetSession
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- context.Context
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ var cmdSet strings.Builder
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset: character_set_connection, character_set_client, character_set_results
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // Other system vars accumulated in a single SET command
+ default:
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteString(", ")
+ }
+ cmdSet.WriteString(param)
+ cmdSet.WriteString(" = ")
+ cmdSet.WriteString(val)
+ }
+ }
+
+ if cmdSet.Len() > 0 {
+ err = mc.exec(cmdSet.String())
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if !mc.closed.Load() {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ if mc.closed.Swap(true) {
+ return
+ }
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.Load() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ // STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
+ errLog.Print(err)
+ return nil, driver.ErrBadConn
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf, err := mc.buf.takeCompleteBuffer()
+ if err != nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return "", ErrInvalidConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case uint64:
+ // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
+ buf = strconv.AppendUint(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ buf = append(buf, '\'')
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, '\'')
+ }
+ case json.RawMessage:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if mc.closed.Load() {
+ return nil, driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ // When ctx is already cancelled, don't watch it.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // When ctx is not cancellable, don't watch it.
+ if ctx.Done() == nil {
+ return nil
+ }
+ // When watcher is not alive, can't watch it.
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watching = true
+ mc.watcher <- ctx
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan context.Context, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx context.Context
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.Load() {
+ return driver.ErrBadConn
+ }
+ mc.reset = true
+ return nil
+}
+
+// IsValid implements driver.Validator interface
+// (From Go 1.15)
+func (mc *mysqlConn) IsValid() bool {
+ return !mc.closed.Load()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
new file mode 100644
index 00000000..d567b4e4
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -0,0 +1,146 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql/driver"
+ "net"
+)
+
+type connector struct {
+ cfg *Config // immutable private copy.
+}
+
+// Connect implements driver.Connector interface.
+// Connect returns a connection to the database.
+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
+ cfg: c.cfg,
+ }
+ mc.parseTime = mc.cfg.ParseTime
+
+ // Connect to Server
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ dctx := ctx
+ if mc.cfg.Timeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
+ }
+ mc.netConn, err = dial(dctx, mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ // Call startWatcher for context support (From Go 1.8)
+ mc.startWatcher()
+ if err := mc.watchCancel(ctx); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ defer mc.finish()
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ authData, plugin, err := mc.readHandshakePacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ if plugin == "" {
+ plugin = defaultAuthPlugin
+ }
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+// Driver implements driver.Connector interface.
+// Driver returns &MySQLDriver{}.
+func (c *connector) Driver() driver.Driver {
+ return &MySQLDriver{}
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 00000000..64e2bced
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,174 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
+ minProtocolVersion = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+ fieldTypeDecimal fieldType = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 00000000..ad7aec21
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,107 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "net"
+ "sync"
+)
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+//
+// Deprecated: users should register a DialContextFunc instead
+type DialFunc func(addr string) (net.Conn, error)
+
+// DialContextFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDialContext
+type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
+
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialContextFunc
+)
+
+// RegisterDialContext registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// The current context for the connection and its address is passed to the dial function.
+func RegisterDialContext(net string, dial DialContextFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials == nil {
+ dials = make(map[string]DialContextFunc)
+ }
+ dials[net] = dial
+}
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+//
+// Deprecated: users should call RegisterDialContext instead
+func RegisterDial(network string, dial DialFunc) {
+ RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
+ return dial(addr)
+ })
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formatted
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ c := &connector{
+ cfg: cfg,
+ }
+ return c.Connect(context.Background())
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
+
+// NewConnector returns new driver.Connector.
+func NewConnector(cfg *Config) (driver.Connector, error) {
+ cfg = cfg.Clone()
+ // normalize the contents of cfg so calls to NewConnector have the same
+ // behavior as MySQLDriver.OpenConnector
+ if err := cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return &connector{cfg: cfg}, nil
+}
+
+// OpenConnector implements driver.DriverContext.
+func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ return &connector{
+ cfg: cfg,
+ }, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 00000000..4b71aaab
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,577 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowFallbackToPlaintext bool // Allows fallback to unencrypted connection if server does not support TLS
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ CheckConnLiveness bool // Check connections for liveness before using them
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ CheckConnLiveness: true,
+ }
+}
+
+func (cfg *Config) Clone() *Config {
+ cp := *cfg
+ if cp.TLS != nil {
+ cp.TLS = cfg.TLS.Clone()
+ }
+ if len(cp.Params) > 0 {
+ cp.Params = make(map[string]string, len(cfg.Params))
+ for k, v := range cfg.Params {
+ cp.Params[k] = v
+ }
+ }
+ if cfg.pubKey != nil {
+ cp.pubKey = &rsa.PublicKey{
+ N: new(big.Int).Set(cfg.pubKey.N),
+ E: cfg.pubKey.E,
+ }
+ }
+ return &cp
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ if cfg.TLS == nil {
+ switch cfg.TLSConfig {
+ case "false", "":
+ // don't set anything
+ case "true":
+ cfg.TLS = &tls.Config{}
+ case "skip-verify":
+ cfg.TLS = &tls.Config{InsecureSkipVerify: true}
+ case "preferred":
+ cfg.TLS = &tls.Config{InsecureSkipVerify: true}
+ cfg.AllowFallbackToPlaintext = true
+ default:
+ cfg.TLS = getTLSConfigClone(cfg.TLSConfig)
+ if cfg.TLS == nil {
+ return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
+ }
+ }
+ }
+
+ if cfg.TLS != nil && cfg.TLS.ServerName == "" && !cfg.TLS.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.TLS.ServerName = host
+ }
+ }
+
+ if cfg.ServerPubKey != "" {
+ cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
+ if cfg.pubKey == nil {
+ return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
+ }
+ }
+
+ return nil
+}
+
+func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
+ buf.Grow(1 + len(name) + 1 + len(value))
+ if !*hasParam {
+ *hasParam = true
+ buf.WriteByte('?')
+ } else {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(name)
+ buf.WriteByte('=')
+ buf.WriteString(value)
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true")
+ }
+
+ if cfg.AllowFallbackToPlaintext {
+ writeDSNParam(&buf, &hasParam, "allowFallbackToPlaintext", "true")
+ }
+
+ if !cfg.AllowNativePasswords {
+ writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false")
+ }
+
+ if cfg.AllowOldPasswords {
+ writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true")
+ }
+
+ if !cfg.CheckConnLiveness {
+ writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false")
+ }
+
+ if cfg.ClientFoundRows {
+ writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ writeDSNParam(&buf, &hasParam, "collation", col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
+ }
+
+ if cfg.InterpolateParams {
+ writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ writeDSNParam(&buf, &hasParam, "multiStatements", "true")
+ }
+
+ if cfg.ParseTime {
+ writeDSNParam(&buf, &hasParam, "parseTime", "true")
+ }
+
+ if cfg.ReadTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
+ }
+
+ if cfg.RejectReadOnly {
+ writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true")
+ }
+
+ if len(cfg.ServerPubKey) > 0 {
+ writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey))
+ }
+
+ if cfg.Timeout > 0 {
+ writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+ writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket))
+ }
+
+ // other params
+ if cfg.Params != nil {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
+ writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param]))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = NewConfig()
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if err = cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+ // Disable INFILE allowlist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Allow fallback to unencrypted connection if server does not support TLS
+ case "allowFallbackToPlaintext":
+ var isBool bool
+ cfg.AllowFallbackToPlaintext, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Check connections for Liveness before using them
+ case "checkConnLiveness":
+ var isBool bool
+ cfg.CheckConnLiveness, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Reject read-only connections
+ case "rejectReadOnly":
+ var isBool bool
+ cfg.RejectReadOnly, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+ cfg.ServerPubKey = name
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
+ cfg.TLSConfig = vl
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+ cfg.TLSConfig = name
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 00000000..ff9a8f08
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,77 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the `Config.MaxAllowedPacket`")
+ ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ SQLState [5]byte
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ if me.SQLState != [5]byte{} {
+ return fmt.Sprintf("Error %d (%s): %s", me.Number, me.SQLState, me.Message)
+ }
+
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
+
+func (me *MySQLError) Is(err error) bool {
+ if merr, ok := err.(*MySQLError); ok {
+ return merr.Number == me.Number
+ }
+ return false
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 00000000..e0654a83
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,206 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED INT"
+ }
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED BIGINT"
+ }
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED SMALLINT"
+ }
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED TINYINT"
+ }
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
new file mode 100644
index 00000000..3a4ec25a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fuzz.go
@@ -0,0 +1,25 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//go:build gofuzz
+// +build gofuzz
+
+package mysql
+
+import (
+ "database/sql"
+)
+
+func Fuzz(data []byte) int {
+ db, err := sql.Open("mysql", string(data))
+ if err != nil {
+ return 0
+ }
+ db.Close()
+ return 1
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 00000000..3279dcff
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file allowlist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the allowlist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := defaultPacketSize
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ return mc.readResultOK()
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
new file mode 100644
index 00000000..36c8a42c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -0,0 +1,71 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "time"
+)
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// # This NullTime implementation is not driver-specific
+//
+// Deprecated: NullTime doesn't honor the loc DSN parameter.
+// NullTime.Scan interprets a time as UTC, not the loc DSN parameter.
+// Use sql.NullTime instead.
+type NullTime sql.NullTime
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime([]byte(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 00000000..ee05c95a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1349 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var prevData []byte
+ for {
+ // read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // packet length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ // check packet sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)-1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
+
+ return append(prevData, data...), nil
+ }
+
+ prevData = append(prevData, data...)
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.reset {
+ mc.reset = false
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.CheckConnLiveness {
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ }
+ if err != nil {
+ errLog.Print("closing bad idle connection: ", err)
+ mc.Close()
+ return driver.ErrBadConn
+ }
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ mc.cleanup()
+ errLog.Print(ErrMalformPkt)
+ } else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
+ errLog.Print(err)
+ }
+ return ErrInvalidConn
+ }
+}
+
+/******************************************************************************
+* Initialization Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+ data, err = mc.readPacket()
+ if err != nil {
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return
+ }
+
+ if data[0] == iERR {
+ return nil, "", mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, "", fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ authData := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, "", ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.TLS != nil {
+ if mc.cfg.AllowFallbackToPlaintext {
+ mc.cfg.TLS = nil
+ } else {
+ return nil, "", ErrNoTLS
+ }
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
+
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.TLS != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLen := len(authResp)
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.TLS != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.TLS)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.rawConn = mc.netConn
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ pos += copy(data[pos:], plugin)
+ data[pos] = 0x00
+ pos++
+
+ // Send Auth packet
+ return mc.writePacket(data[:pos])
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+ pktLen := 4 + len(authData)
+ data, err := mc.buf.takeSmallBuffer(pktLen)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
+
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) == 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
+ }
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
+ }
+ return mc.handleErrorPacket(data)
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
+ me := &MySQLError{Number: errno}
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ copy(me.SQLState[:], data[4:4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ me.Message = string(data[pos:])
+
+ return me
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
+ }
+
+ // warning count [2 bytes]
+
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Filler [uint8]
+ pos++
+
+ // Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
+ // Length [uint32]
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
+
+ // Field type [uint8]
+ columns[i].fieldType = fieldType(data[pos])
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ if rows.rs.done {
+ return io.EOF
+ }
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var (
+ n int
+ isNull bool
+ pos int = 0
+ )
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+
+ if err != nil {
+ return err
+ }
+
+ if isNull {
+ dest[i] = nil
+ continue
+ }
+
+ if !mc.parseTime {
+ continue
+ }
+
+ // Parse time field
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeTimestamp,
+ fieldTypeDateTime,
+ fieldTypeDate,
+ fieldTypeNewDate:
+ if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Cannot use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Determine threshold dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+ var err error
+
+ if len(args) == 0 {
+ data, err = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data, err = mc.buf.takeCompleteBuffer()
+ // In this case the len(data) == cap(data) which is used to optimise the flow below.
+ }
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ // No need to clean nullMask as make ensures that.
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := range nullMask {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ if v, ok := arg.(json.RawMessage); ok {
+ arg = []byte(v)
+ }
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case uint64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x80 // type is unsigned
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = byte(fieldTypeDouble)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = byte(fieldTypeTiny)
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ var a [64]byte
+ var b = a[:0]
+
+ if v.IsZero() {
+ b = append(b, "0000-00-00"...)
+ } else {
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ if err != nil {
+ return err
+ }
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(b)),
+ )
+ paramValues = append(paramValues, b...)
+
+ default:
+ return fmt.Errorf("cannot convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ if err = mc.buf.store(data); err != nil {
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ mc := rows.mc
+ rows.mc = nil
+
+ // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 00000000..c6438d03
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 00000000..888bdb5f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,223 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "math"
+ "reflect"
+)
+
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
+ finish func()
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
+ } else {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+
+ rows.rs.columnNames = columns
+ return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // flip the buffer for this connection if we need to drain it.
+ // note that for a successful query (i.e. one where rows.next()
+ // has been called until it returns false), `rows.mc` will be nil
+ // by the time the user calls `(*Rows).Close`, so we won't reach this
+ // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
+ mc.buf.flip()
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 00000000..10ece8bd
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,220 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.closed.Load() {
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ return rows, err
+}
+
+var jsonType = reflect.TypeOf(json.RawMessage{})
+
+type converter struct{}
+
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if driver.IsValue(sv) {
+ return sv, nil
+ }
+ // A value returned from the Valuer interface can be "a type handled by
+ // a database driver's NamedValueChecker interface" so we should accept
+ // uint64 here as well.
+ if u, ok := sv.(uint64); ok {
+ return u, nil
+ }
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ switch t := rv.Type(); {
+ case t == jsonType:
+ return v, nil
+ case t.Elem().Kind() == reflect.Uint8:
+ return rv.Bytes(), nil
+ default:
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
+ }
+ case reflect.String:
+ return rv.String(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 00000000..4a4b6100
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.closed.Load() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.closed.Load() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 00000000..15dbd8d1
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,834 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Registry for custom tls.Configs
+var (
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
+ }
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = v.Clone()
+ }
+ tlsConfigLock.RUnlock()
+ return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
+ const base = "0000-00-00 00:00:00.000000"
+ switch len(b) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if string(b) == base[:len(b)] {
+ return time.Time{}, nil
+ }
+
+ year, err := parseByteYear(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[4] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
+ }
+
+ m, err := parseByte2Digits(b[5], b[6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ month := time.Month(m)
+
+ if b[7] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
+ }
+
+ day, err := parseByte2Digits(b[8], b[9])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 10 {
+ return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
+ }
+
+ if b[10] != ' ' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
+ }
+
+ hour, err := parseByte2Digits(b[11], b[12])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[13] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
+ }
+
+ min, err := parseByte2Digits(b[14], b[15])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[16] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
+ }
+
+ sec, err := parseByte2Digits(b[17], b[18])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 19 {
+ return time.Date(year, month, day, hour, min, sec, 0, loc), nil
+ }
+
+ if b[19] != '.' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
+ }
+ nsec, err := parseByteNanoSec(b[20:])
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
+ default:
+ return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
+ }
+}
+
+func parseByteYear(b []byte) (int, error) {
+ year, n := 0, 1000
+ for i := 0; i < 4; i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ year += v * n
+ n /= 10
+ }
+ return year, nil
+}
+
+func parseByte2Digits(b1, b2 byte) (int, error) {
+ d1, err := bToi(b1)
+ if err != nil {
+ return 0, err
+ }
+ d2, err := bToi(b2)
+ if err != nil {
+ return 0, err
+ }
+ return d1*10 + d2, nil
+}
+
+func parseByteNanoSec(b []byte) (int, error) {
+ ns, digit := 0, 100000 // max is 6-digits
+ for i := 0; i < len(b); i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ ns += v * digit
+ digit /= 10
+ }
+ // nanoseconds has 10-digits. (needs to scale digits)
+ // 10 - 6 = 4, so we have to multiple 1000.
+ return ns * 1000, nil
+}
+
+func bToi(b byte) (int, error) {
+ if b < '0' || b > '9' {
+ return 0, errors.New("not [0-9]")
+ }
+ return int(b - '0'), nil
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+ year, month, day := t.Date()
+ hour, min, sec := t.Clock()
+ nsec := t.Nanosecond()
+
+ if year < 1 || year > 9999 {
+ return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
+ }
+ year100 := year / 100
+ year1 := year % 100
+
+ var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
+ localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
+ localBuf[4] = '-'
+ localBuf[5], localBuf[6] = digits10[month], digits01[month]
+ localBuf[7] = '-'
+ localBuf[8], localBuf[9] = digits10[day], digits01[day]
+
+ if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
+ return append(buf, localBuf[:10]...), nil
+ }
+
+ localBuf[10] = ' '
+ localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
+ localBuf[13] = ':'
+ localBuf[14], localBuf[15] = digits10[min], digits01[min]
+ localBuf[16] = ':'
+ localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
+
+ if nsec == 0 {
+ return append(buf, localBuf[:19]...), nil
+ }
+ nsec100000000 := nsec / 100000000
+ nsec1000000 := (nsec / 1000000) % 100
+ nsec10000 := (nsec / 10000) % 100
+ nsec100 := (nsec / 100) % 100
+ nsec1 := nsec % 100
+ localBuf[19] = '.'
+
+ // milli second
+ localBuf[20], localBuf[21], localBuf[22] =
+ digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
+ // micro second
+ localBuf[23], localBuf[24], localBuf[25] =
+ digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
+ // nano second
+ localBuf[26], localBuf[27], localBuf[28] =
+ digits01[nsec100], digits10[nsec1], digits01[nsec1]
+
+ // trim trailing zeros
+ n := len(localBuf)
+ for n > 0 && localBuf[n-1] == '0' {
+ n--
+ }
+
+ return append(buf, localBuf[:n]...), nil
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+ if decimals <= 0 {
+ return dst
+ }
+ if len(src) == 0 {
+ return append(dst, ".000000"[:decimals+1]...)
+ }
+
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 := byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 := byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 := byte(microsecs)
+
+ switch decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ )
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ )
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ )
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ )
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ )
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ )
+ }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var p1, p2, p3 byte // current digit pair
+
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt := year / 100
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ var dst []byte // return value
+
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ days := binary.LittleEndian.Uint32(src[1:5])
+ hours := int64(days)*24 + int64(src[5])
+
+ if hours >= 100 {
+ dst = strconv.AppendInt(dst, hours, 10)
+ } else {
+ dst = append(dst, digits10[hours], digits01[hours])
+ }
+
+ min, sec := src[6], src[7]
+ dst = append(dst, ':',
+ digits10[min], digits01[min], ':',
+ digits10[sec], digits01[sec],
+ )
+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, whether the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+
+ switch b[0] {
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos+1] = '0'
+ buf[pos] = '\\'
+ pos += 2
+ case '\n':
+ buf[pos+1] = 'n'
+ buf[pos] = '\\'
+ pos += 2
+ case '\r':
+ buf[pos+1] = 'r'
+ buf[pos] = '\\'
+ pos += 2
+ case '\x1a':
+ buf[pos+1] = 'Z'
+ buf[pos] = '\\'
+ pos += 2
+ case '\'':
+ buf[pos+1] = '\''
+ buf[pos] = '\\'
+ pos += 2
+ case '"':
+ buf[pos+1] = '"'
+ buf[pos] = '\\'
+ pos += 2
+ case '\\':
+ buf[pos+1] = '\\'
+ buf[pos] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos+1] = '0'
+ buf[pos] = '\\'
+ pos += 2
+ case '\n':
+ buf[pos+1] = 'n'
+ buf[pos] = '\\'
+ pos += 2
+ case '\r':
+ buf[pos+1] = 'r'
+ buf[pos] = '\\'
+ pos += 2
+ case '\x1a':
+ buf[pos+1] = 'Z'
+ buf[pos] = '\\'
+ pos += 2
+ case '\'':
+ buf[pos+1] = '\''
+ buf[pos] = '\\'
+ pos += 2
+ case '"':
+ buf[pos+1] = '"'
+ buf[pos] = '\\'
+ pos += 2
+ case '\\':
+ buf[pos+1] = '\\'
+ buf[pos] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos+1] = '\''
+ buf[pos] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos+1] = '\''
+ buf[pos] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// Unlock is a no-op used by -copylocks checker from `go vet`.
+// noCopy should implement sync.Locker from Go 1.11
+// https://github.com/golang/go/commit/c2eba53e7f80df21d51285879d51ab81bcfbf6bc
+// https://github.com/golang/go/issues/26165
+func (*noCopy) Unlock() {}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _ noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7961be45..615f1d19 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -64,6 +64,9 @@ github.com/go-openapi/jsonreference
# github.com/go-openapi/swag v0.19.14
## explicit; go 1.11
github.com/go-openapi/swag
+# github.com/go-sql-driver/mysql v1.7.1
+## explicit; go 1.13
+github.com/go-sql-driver/mysql
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
github.com/gogo/protobuf/proto