Skip to content

Commit

Permalink
Change ingress to ClickHouse server with tls enabled
Browse files Browse the repository at this point in the history
Signed-off-by: Yun-Tang Hsu <[email protected]>
  • Loading branch information
yuntanghsu committed Jul 18, 2023
1 parent 861e52b commit c24a395
Show file tree
Hide file tree
Showing 11 changed files with 100 additions and 146 deletions.
6 changes: 1 addition & 5 deletions ci/kind/test-e2e-kind.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@ THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
TESTBED_CMD="$THIS_DIR/kind-setup.sh"
YML_CMD="$THIS_DIR/../../hack/generate-manifest.sh"
FLOWAGGREGATOR_YML_CMD="$THIS_DIR/../../hack/generate-manifest-flow-aggregator.sh"
CLICKHOUSE_TLS_YML_CMD="$THIS_DIR/../../hack/generate-manifest-clickhouse-tls.sh"
INGRESS_NGINX_CONTROLLER="$THIS_DIR/../../build/yamls/ingress-nginx-controller.yml"
FLOW_VISIBILITY_HELM_VALUES="$THIS_DIR/values-flow-exporter.yml"
CH_OPERATOR_YML="$THIS_DIR/../../build/yamls/clickhouse-operator-install-bundle.yml"
FLOW_VISIBILITY_CHART="$THIS_DIR/../../test/e2e/charts/flow-visibility"
Expand Down Expand Up @@ -258,15 +256,13 @@ function run_test {
$FLOWAGGREGATOR_YML_CMD | docker exec -i kind-control-plane dd of=/root/flow-aggregator.yml
fi
$HELM template "$FLOW_VISIBILITY_CHART" | docker exec -i kind-control-plane dd of=/root/flow-visibility.yml
$HELM template "$FLOW_VISIBILITY_CHART" --set "ingress.enable=true" | docker exec -i kind-control-plane dd of=/root/flow-visibility-tls.yml
$HELM template "$FLOW_VISIBILITY_CHART" --set "secureConnection.enable=true" | docker exec -i kind-control-plane dd of=/root/flow-visibility-tls.yml

curl -o $CH_OPERATOR_YML https://raw.githubusercontent.com/Altinity/clickhouse-operator/release-0.21.0/deploy/operator/clickhouse-operator-install-bundle.yaml
curl -o $INGRESS_NGINX_CONTROLLER https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/cloud/deploy.yaml
sed -i -e "s|\"image\": \"clickhouse/clickhouse-server:22.3\"|\"image\": \"projects.registry.vmware.com/antrea/clickhouse-server:23.4\"|g" $CH_OPERATOR_YML
sed -i -e "s|image: altinity/clickhouse-operator:0.21.0|image: projects.registry.vmware.com/antrea/clickhouse-operator:0.21.0|g" $CH_OPERATOR_YML
sed -i -e "s|image: altinity/metrics-exporter:0.21.0|image: projects.registry.vmware.com/antrea/metrics-exporter:0.21.0|g" $CH_OPERATOR_YML
cat $CH_OPERATOR_YML | docker exec -i kind-control-plane dd of=/root/clickhouse-operator-install-bundle.yml
cat $INGRESS_NGINX_CONTROLLER | docker exec -i kind-control-plane dd of=/root/ingress-nginx-controller.yml
fi

if $proxy_all; then
Expand Down
13 changes: 13 additions & 0 deletions test/e2e/charts/flow-visibility/provisioning/tls/dhparam.pem
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
-----BEGIN DH PARAMETERS-----
MIICCAKCAgEA2kXNnoHdFVUbwHJb17JTNNrNdUqQdbq/gZvEtPsNNoqVc8zLfrKC
yV0qwOd24+FCn4h+5RE+pDgikH4AIzQmfDI3Nmc5DpARUdmTJau5dhCT+J0zc+FF
smKuiN0ApHabnB+WN/EHJy0e20oiVgYefYCXjZWogK9FG6YNFDhhm+n3RAq/hclI
TKcLAW/8tvvJjwVi3dSWPXFtIgwY/DUhct+VbFer0YYCb2k8MBs2Pjl51Pz0u8a9
T/Y7JKYA5GNlQSFYpmnxb/cR4BOES5ZMH7nBAsjiSn9UUo9DbM5gf8zIesuYnnw9
wY+OvrjK0o1nEXGycWBIfpAg89i77asWCqq139XN6JcG4nCJWGusrp+6Pw3Bwwah
UiCrYZVldSIeEsy3ueqVouO7fhLrjbf6nDDBcanSwYniKvRsrxIfBZ9ho3d0PzS/
kEAe9CPW9SD2P/4LGNDy46YRg9hMFWdJ+1N5Rr9PosAWKvJ0Y2zkk6ihOH4mr9e3
f2rJISke3z61GAUEB9PokNSbmHIrO9NLnifBhMXoFmdRWZIWJFz9MI5lEHEnt2sz
ig9vSlva+K4o2GxgIKekLE+DjBL6SjBKPuQxSyw4O6SIOyMEv7mPJLAj/lJ+4KQM
jQDvgtdWzrZSAVMO+tmT61Jjjs7XvL1U7cmnc9Llxusxlt2bdAxmafsCAQI=
-----END DH PARAMETERS-----
16 changes: 16 additions & 0 deletions test/e2e/charts/flow-visibility/provisioning/tls/settings.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
<yandex>
<https_port>8443</https_port>
<tcp_port_secure>9440</tcp_port_secure>
<openSSL>
<server>
<certificateFile>/opt/certs/tls.crt</certificateFile>
<privateKeyFile>/opt/certs/tls.key</privateKeyFile>
<dhParamsFile>/etc/clickhouse-server/config.d/dhparam.pem</dhParamsFile>
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
</openSSL>
</yandex>
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
{{- define "clickhouse.tlsConfig" -}}
{{- $Files := .Files }}
{{- $Global := .Global }}
{{- range $path, $_ := .Files.Glob "provisioning/tls/*" }}
{{ regexReplaceAll "(.*)/" $path "" }}: |
{{ tpl ($.Files.Get $path) $Global | indent 2 }}
{{- end }}
{{- end -}}

apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
Expand All @@ -10,8 +19,22 @@ spec:
users:
clickhouse_operator/k8s_secret_password: flow-visibility/clickhouse-secret/password
clickhouse_operator/networks/ip: "::/0"
profiles:
readonly/readonly: 1
{{- if .Values.secureConnection.enable }}
files:
{{- include "clickhouse.tlsConfig" (dict "Files" .Files "Global" .) | indent 6 }}
{{- end }}
clusters:
- name: "clickhouse"
{{- if .Values.secureConnection.enable }}
secure: "yes"
settings:
tcp_port: 9000
tcp_port_secure: 9440
https_port: 8443
http_port: 8123
{{- end }}
layout:
shardsCount: 1
replicasCount: 1
Expand All @@ -29,6 +52,12 @@ spec:
port: 8123
- name: tcp
port: 9000
{{- if .Values.secureConnection.enable }}
- name: https
port: 8443
- name: secureclient
port: 9440
{{- end }}
podTemplates:
- name: pod-template
spec:
Expand All @@ -41,6 +70,14 @@ spec:
mountPath: /docker-entrypoint-initdb.d
- name: clickhouse-storage-volume
mountPath: /var/lib/clickhouse
{{- if .Values.secureConnection.enable }}
- name: clickhouse-tls
mountPath: /opt/certs/tls.crt
subPath: tls.crt
- name: clickhouse-tls
mountPath: /opt/certs/tls.key
subPath: tls.key
{{- end }}
volumes:
- name: clickhouse-configmap-volume
configMap:
Expand All @@ -49,3 +86,9 @@ spec:
emptyDir:
medium: Memory
sizeLimit: 8Gi
{{- if .Values.secureConnection.enable }}
- name: clickhouse-tls
secret:
secretName: clickhouse-tls
optional: true
{{- end }}
27 changes: 0 additions & 27 deletions test/e2e/charts/flow-visibility/templates/ingress.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion test/e2e/charts/flow-visibility/templates/namespace.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{{- if .Values.ingress.enable }}
{{- if .Values.secureConnection.enable }}
apiVersion: v1
kind: Namespace
metadata:
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/charts/flow-visibility/templates/secret.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{{- if .Values.ingress.enable }}
{{- $cert := genSelfSignedCert .Values.ingress.commonName .Values.ingress.ipAddresses (uniq (append .Values.ingress.dnsNames .Values.ingress.commonName)) (.Values.ingress.daysValid | int) }}
{{- if .Values.secureConnection.enable }}
{{- $cert := genSelfSignedCert .Values.secureConnection.commonName .Values.secureConnection.ipAddresses (uniq (append .Values.secureConnection.dnsNames .Values.secureConnection.commonName)) (.Values.secureConnection.daysValid | int) }}
{{- $certPEM := $cert.Cert | b64enc }}
{{- $keyPEM := $cert.Key | b64enc }}
---
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/charts/flow-visibility/values.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Settings for ingress
ingress:
# Settings for ClickHouse
secureConnection:
enable: false
# -- Common name to use in the certificate.
commonName: "clickhouse.localdev.me"
commonName: "clickhouse-clickhouse.flow-visibility.svc"
# -- IP addresses to use in the certificate.
ipAddresses: []
# -- DNS names to use in the certificate.
Expand Down
7 changes: 0 additions & 7 deletions test/e2e/fixtures.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,13 +478,6 @@ func teardownFlowAggregator(tb testing.TB, data *TestData) {
}
}

func teardownIngressNginxController(tb testing.TB, data *TestData) {
tb.Logf("Deleting ingress-nginx-controller")
if err := data.deleteIngressNginxController(); err != nil {
tb.Logf("Error when removing ingress-nginx-controller: %v", err)
}
}

func teardownTest(tb testing.TB, data *TestData) {
exportLogs(tb, data, "beforeTeardown", true)
if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty {
Expand Down
25 changes: 16 additions & 9 deletions test/e2e/flowaggregator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,14 +167,24 @@ func TestFlowAggregatorSecureConnection(t *testing.T) {
skipIfHasWindowsNodes(t)
options := []flowVisibilityTestOptions{
{
databaseURL: "http://clickhouse-clickhouse.flow-visibility.svc:8123",
ingress: false,
name: "http",
databaseURL: "tcp://clickhouse-clickhouse.flow-visibility.svc:9000",
secureConnection: false,
name: "tcp",
},
{
databaseURL: "https://clickhouse.localdev.me",
ingress: true,
name: "https",
databaseURL: "http://clickhouse-clickhouse.flow-visibility.svc:8123",
secureConnection: false,
name: "http",
},
{
databaseURL: "tls://clickhouse-clickhouse.flow-visibility.svc:9440",
secureConnection: true,
name: "tls",
},
{
databaseURL: "https://clickhouse-clickhouse.flow-visibility.svc:8443",
secureConnection: true,
name: "https",
},
}
for _, o := range options {
Expand All @@ -188,9 +198,6 @@ func TestFlowAggregatorSecureConnection(t *testing.T) {
// Execute teardownFlowAggregator later than teardownTest to ensure that the log
// of Flow Aggregator has been exported.
teardownFlowAggregator(t, data)
if o.ingress {
teardownIngressNginxController(t, data)
}
}()
podAIPs, podBIPs, _, _, _, err := createPerftestPods(data)
if err != nil {
Expand Down
97 changes: 5 additions & 92 deletions test/e2e/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,10 @@ const (
flowVisibilityYML string = "flow-visibility.yml"
flowVisibilityTLSYML string = "flow-visibility-tls.yml"
chOperatorYML string = "clickhouse-operator-install-bundle.yml"
ingressNginxControllerYML string = "ingress-nginx-controller.yml"
flowVisibilityCHPodName string = "chi-clickhouse-clickhouse-0-0-0"
flowVisibilityNamespace string = "flow-visibility"
ingressNginxNamespace string = "ingress-nginx"
defaultBridgeName string = "br-int"
monitoringNamespace string = "monitoring"
ingressClickHouseHost string = "clickhouse.localdev.me"
ingressSvcName = "ingress-nginx-controller"

antreaControllerCovBinary string = "antrea-controller-coverage"
antreaAgentCovBinary string = "antrea-agent-coverage"
Expand Down Expand Up @@ -205,9 +201,9 @@ type TestOptions struct {
}

type flowVisibilityTestOptions struct {
databaseURL string
ingress bool
name string
databaseURL string
secureConnection bool
name string
}

var testOptions TestOptions
Expand Down Expand Up @@ -802,11 +798,8 @@ func (data *TestData) deployFlowVisibilityClickHouse(o flowVisibilityTestOptions
}

flowVisYML := flowVisibilityYML
if o.ingress {
if o.secureConnection {
flowVisYML = flowVisibilityTLSYML
if err := testData.deployIngressNginxController(); err != nil {
return "", fmt.Errorf("error when deploying ingress-nginx-controller: %v", err)
}
}

rc, _, _, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply -f %s", chOperatorYML))
Expand Down Expand Up @@ -888,18 +881,6 @@ func (data *TestData) deleteClickHouseOperator() error {
return nil
}

func (data *TestData) deleteIngressNginxController() error {
startTime := time.Now()
defer func() {
log.Infof("Deleting ingress-nginx controller took %v", time.Since(startTime))
}()
rc, _, _, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl delete -f %s -n kube-system", ingressNginxControllerYML))
if err != nil || rc != 0 {
return fmt.Errorf("error when deleting ingress-nginx controller: %v", err)
}
return nil
}

// deployFlowAggregator deploys the Flow Aggregator with ipfix collector and clickHouse address.
func (data *TestData) deployFlowAggregator(ipfixCollector string, o flowVisibilityTestOptions) error {

Expand All @@ -914,23 +895,6 @@ func (data *TestData) deployFlowAggregator(ipfixCollector string, o flowVisibili
if err = data.mutateFlowAggregatorConfigMap(ipfixCollector, o); err != nil {
return err
}
if o.ingress {
svc, err := data.clientset.CoreV1().Services(ingressNginxNamespace).Get(context.TODO(), ingressSvcName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("unable to get Service %s: %v", ingressSvcName, err)
}
err = wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) {
err = data.updateFlowAggregatorDeployment(svc.Spec.ClusterIP)
if err != nil {
// Keep trying
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("not able to update Flow Aggregator Deployment after %v", defaultTimeout)
}
}
if rc, _, _, err = data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deployment/%s --timeout=%v", flowAggregatorNamespace, flowAggregatorDeployment, 2*defaultTimeout)); err != nil || rc != 0 {
_, stdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace))
_, logStdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s logs -l app=flow-aggregator", flowAggregatorNamespace))
Expand All @@ -947,40 +911,6 @@ func (data *TestData) deployFlowAggregator(ipfixCollector string, o flowVisibili
return nil
}

// deployIngressNginxController deploys ingress-nginx-controller
func (data *TestData) deployIngressNginxController() error {
rc, _, _, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply -f %s", ingressNginxControllerYML))
if err != nil || rc != 0 {
return fmt.Errorf("error when deploying the ingress-nginx-controller; %s not available on the control-plane Node", ingressNginxControllerYML)
}
// Check for ingress-nginx-controller Pod is running
err = data.waitForIngressNginxControllerPod()
if err != nil {
return fmt.Errorf("error when waiting ingress-nginx-controller Pod to be ready: %v", err)
}
return nil
}

func (data *TestData) waitForIngressNginxControllerPod() error {
err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) {
deployment, err := data.clientset.AppsV1().Deployments(ingressNginxNamespace).Get(context.TODO(), "ingress-nginx-controller", metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error when retrieving ingress-nginx-controller deployment: %v", err)
}
if deployment.Status.UnavailableReplicas == 0 {
return true, nil
}
// Keep trying
return false, nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("ingress-nginx-controller pod is still unavailable after %v", defaultTimeout)
} else if err != nil {
return err
}
return nil
}

func (data *TestData) mutateFlowAggregatorConfigMap(ipfixCollectorAddr string, o flowVisibilityTestOptions) error {
configMap, err := data.GetFlowAggregatorConfigMap()
if err != nil {
Expand All @@ -1004,7 +934,7 @@ func (data *TestData) mutateFlowAggregatorConfigMap(ipfixCollectorAddr string, o
flowAggregatorConf.InactiveFlowRecordTimeout = aggregatorInactiveFlowRecordTimeout.String()
flowAggregatorConf.RecordContents.PodLabels = true
flowAggregatorConf.ClickHouse.DatabaseURL = o.databaseURL
if o.ingress {
if o.secureConnection {
flowAggregatorConf.ClickHouse.TLS.CACert = true
}

Expand Down Expand Up @@ -1041,23 +971,6 @@ func (data *TestData) GetFlowAggregatorConfigMap() (*corev1.ConfigMap, error) {
return configMap, nil
}

func (data *TestData) updateFlowAggregatorDeployment(ip string) error {
deployment, err := data.clientset.AppsV1().Deployments(flowAggregatorNamespace).Get(context.TODO(), flowAggregatorDeployment, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to retrieve Flow aggregator deployment: %v", err)
}
deployment.Spec.Template.Spec.HostAliases = []corev1.HostAlias{
{
IP: ip,
Hostnames: []string{ingressClickHouseHost},
},
}
if _, err := data.clientset.AppsV1().Deployments(flowAggregatorNamespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update Flow aggregator deployment: %v", err)
}
return nil
}

// getAgentContainersRestartCount reads the restart count for every container across all Antrea
// Agent Pods and returns the sum of all the read values.
func (data *TestData) getAgentContainersRestartCount() (int, error) {
Expand Down

0 comments on commit c24a395

Please sign in to comment.