From 9832753ebf10feeca37b1c3cddd06d116dbc1099 Mon Sep 17 00:00:00 2001 From: keval Date: Fri, 24 May 2024 18:58:09 +0530 Subject: [PATCH] Added MW AWS scraper chart --- charts/mw-aws-data-scraper/.helmignore | 0 charts/mw-aws-data-scraper/Chart.yaml | 24 ++ .../templates/_helpers.tpl | 62 +++++ .../templates/clusterrole.yaml | 51 ++++ .../templates/clusterrolebinding.yaml | 15 ++ .../templates/configmap-deployment.yaml | 222 ++++++++++++++++++ .../templates/cronjob.yaml | 44 ++++ charts/mw-aws-data-scraper/templates/hpa.yaml | 18 ++ .../templates/role-update.yaml | 14 ++ .../mw-aws-data-scraper/templates/role.yaml | 35 +++ .../templates/rolebinding-update.yaml | 15 ++ .../templates/rolebinding.yaml | 16 ++ .../mw-aws-data-scraper/templates/secret.yaml | 11 + .../templates/service.yaml | 24 ++ .../templates/serviceaccount-update.yaml | 7 + .../templates/serviceaccount.yaml | 13 + .../templates/statefulset-manager.yaml | 65 +++++ .../templates/statefulset.yaml | 65 +++++ charts/mw-aws-data-scraper/values.yaml | 96 ++++++++ 19 files changed, 797 insertions(+) create mode 100644 charts/mw-aws-data-scraper/.helmignore create mode 100644 charts/mw-aws-data-scraper/Chart.yaml create mode 100644 charts/mw-aws-data-scraper/templates/_helpers.tpl create mode 100644 charts/mw-aws-data-scraper/templates/clusterrole.yaml create mode 100644 charts/mw-aws-data-scraper/templates/clusterrolebinding.yaml create mode 100644 charts/mw-aws-data-scraper/templates/configmap-deployment.yaml create mode 100644 charts/mw-aws-data-scraper/templates/cronjob.yaml create mode 100644 charts/mw-aws-data-scraper/templates/hpa.yaml create mode 100644 charts/mw-aws-data-scraper/templates/role-update.yaml create mode 100644 charts/mw-aws-data-scraper/templates/role.yaml create mode 100644 charts/mw-aws-data-scraper/templates/rolebinding-update.yaml create mode 100644 charts/mw-aws-data-scraper/templates/rolebinding.yaml create mode 100644 charts/mw-aws-data-scraper/templates/secret.yaml create mode 100644 charts/mw-aws-data-scraper/templates/service.yaml create mode 100644 charts/mw-aws-data-scraper/templates/serviceaccount-update.yaml create mode 100644 charts/mw-aws-data-scraper/templates/serviceaccount.yaml create mode 100644 charts/mw-aws-data-scraper/templates/statefulset-manager.yaml create mode 100644 charts/mw-aws-data-scraper/templates/statefulset.yaml create mode 100644 charts/mw-aws-data-scraper/values.yaml diff --git a/charts/mw-aws-data-scraper/.helmignore b/charts/mw-aws-data-scraper/.helmignore new file mode 100644 index 0000000..e69de29 diff --git a/charts/mw-aws-data-scraper/Chart.yaml b/charts/mw-aws-data-scraper/Chart.yaml new file mode 100644 index 0000000..08680c9 --- /dev/null +++ b/charts/mw-aws-data-scraper/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: mw-kube-agent-v2 +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 2.0.7 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.3" diff --git a/charts/mw-aws-data-scraper/templates/_helpers.tpl b/charts/mw-aws-data-scraper/templates/_helpers.tpl new file mode 100644 index 0000000..53be278 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "mw-kube-agent.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mw-kube-agent.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mw-kube-agent.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "mw-kube-agent.labels" -}} +helm.sh/chart: {{ include "mw-kube-agent.chart" . }} +{{ include ".Values.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define ".Values.selectorLabels" -}} +app.kubernetes.io/name: {{ include "mw-kube-agent.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "mw-kube-agent.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "mw-kube-agent.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/mw-aws-data-scraper/templates/clusterrole.yaml b/charts/mw-aws-data-scraper/templates/clusterrole.yaml new file mode 100644 index 0000000..dbda87f --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/clusterrole.yaml @@ -0,0 +1,51 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "mw-kube-agent.labels" . | nindent 4 }} + name: {{ .Values.clusterRole.name }} + namespace: {{.Values.namespace.name}} +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + + # Other resources + - apiGroups: [""] + resources: ["nodes", "nodes/stats", "namespaces", "pods", "serviceaccounts", "services", "configmaps", "endpoints", "persistentvolumeclaims", "replicationcontrollers", "replicationcontrollers/scale", "persistentvolumeclaims", "persistentvolumes", "bindings", "events", "limitranges", "namespaces/status", "pods/log", "pods/status", "replicationcontrollers/status", "resourcequotas", "resourcequotas/status"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "deployments/scale", "replicasets", "replicasets/scale", "statefulsets"] + verbs: ["get", "list", "watch", "patch"] + + - apiGroups: ["autoscaling"] + resources: ["horizontalpodautoscalers"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["batch"] + resources: ["cronjobs", "jobs"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["extensions"] + resources: ["daemonsets", "deployments", "deployments/scale", "networkpolicies", "replicasets", "replicasets/scale", "replicationcontrollers/scale"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses", "networkpolicies"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "volumeattachments"] + verbs: ["get", "list", "watch"] + + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings", "clusterroles", "roles", "rolebindings", ] + verbs: ["get", "list", "watch"] +{{- end }} \ No newline at end of file diff --git a/charts/mw-aws-data-scraper/templates/clusterrolebinding.yaml b/charts/mw-aws-data-scraper/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..9eac17b --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{.Values.clusterRoleBinding.name}} + namespace: {{.Values.namespace.name}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{.Values.clusterRole.name}} +subjects: + - kind: ServiceAccount + name: {{ include "mw-kube-agent.serviceAccountName" . }} + namespace: {{.Values.namespace.name}} +{{- end }} diff --git a/charts/mw-aws-data-scraper/templates/configmap-deployment.yaml b/charts/mw-aws-data-scraper/templates/configmap-deployment.yaml new file mode 100644 index 0000000..ab7dd0c --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/configmap-deployment.yaml @@ -0,0 +1,222 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.deployment.configMap.name }} + namespace: {{.Values.namespace.name}} + labels: + {{- include "mw-kube-agent.labels" . | nindent 4 }} +data: + otel-config: | + receivers: + k8s_cluster: + auth_type: serviceAccount + collection_interval: 15s + node_conditions_to_report: [ Ready, DiskPressure, MemoryPressure, PIDPressure, NetworkUnavailable ] + distribution: kubernetes + allocatable_types_to_report: [ cpu, memory, ephemeral-storage, storage ] + + prometheus: + config: + scrape_configs: + - job_name: "otel-collector" + scrape_interval: 5s + static_configs: + - targets: ["0.0.0.0:8888"] + + fluentforward: + endpoint: 0.0.0.0:8006 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:9319 + http: + endpoint: 0.0.0.0:9320 + exporters: + logging: + loglevel: warn + otlp: + endpoint: ${MW_TARGET} + processors: + k8sattributes: + auth_type: "serviceAccount" + passthrough: false + filter: + node_from_env_var: KUBE_NODE_NAME + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.namespace.name + - k8s.node.name + - k8s.pod.start_time + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + - sources: + - from: resource_attribute + name: k8s.pod.uid + - sources: + - from: connection + resource: + attributes: + - key: host.id + from_attribute: host.name + action: upsert + - key: mw.account_key + action: insert + value: ${MW_API_KEY} + - key: agent.installation.time + from_attribute: host.name + action: insert + - key: agent.installation.time + value: ${MW_AGENT_INSTALLATION_TIME} + action: update + - key: k8s.cluster.name + from_attribute: k8s.node.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.namespace.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.pod.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.container.name + action: insert + - key: k8s.cluster.name + from_attribute: k8s.replicaset.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.statefulset.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.cronjob.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.job.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.daemonset.uid + action: insert + - key: k8s.cluster.name + from_attribute: k8s.deployment.uid + action: insert + resource/hostmetrics: + attributes: + - key: is.k8s.node + action: insert + value: "yes" + + resource/cluster: + attributes: + - key: k8s.cluster.name + action: update + value: {{ .Values.clusterMetadata.name }} + - key: host.id + action: update + from_attribute: k8s.node.name + - key: host.name + action: update + from_attribute: k8s.node.name + + resource/logs: + attributes: + - key: service.name + action: insert + value: middleware-logs + + resourcedetection: + detectors: [ env, system, docker ] + system: + hostname_sources: ["os"] + timeout: 2s + override: false + batch: + batch/2: + send_batch_size: 2000 + timeout: 10s + attributes/traces: + actions: + - key: mw.service.name.derived + from_attribute: db.system + action: insert + - key: mw.service.name.derived + from_attribute: messaging.system + action: insert + - key: mw.service.name.derived + from_attribute: rpc.system + action: insert + - key: mw.service.name.derived + from_attribute: http.scheme + action: insert + - key: mw.service.name.derived + from_attribute: faas.trigger + action: insert + attributes/logs: + actions: + - key: source + from_attribute: name + action: upsert + - key: source + from_attribute: operator_type + action: upsert + - key: source + from_attribute: log.file.name + action: upsert + - key: source + from_attribute: fluent.tag + action: upsert + - key: source + from_attribute: service.name + action: upsert + - key: source + from_attribute: project.name + action: upsert + - key: source + from_attribute: serviceName + action: upsert + - key: source + from_attribute: projectName + action: upsert + - key: source + from_attribute: pod_name + action: upsert + - key: source + from_attribute: container_name + action: upsert + - key: source + from_attribute: namespace + action: upsert + service: + telemetry: + logs: + level: "fatal" + metrics: + address: 0.0.0.0:8888 + pipelines: + traces/otlp: + receivers: [ otlp ] + processors: [ resourcedetection,resource, resource/cluster, attributes/traces, batch, batch/2 ] + exporters: [ otlp ] + logs/fluentforward: + receivers: [ fluentforward ] + processors: [ resourcedetection, resource, resource/cluster, attributes/logs, resource/logs, k8sattributes, batch, batch/2 ] + exporters: [ otlp ] + logs/otlp: + receivers: [ otlp ] + processors: [ resourcedetection, resource, resource/cluster, attributes/logs, resource/logs, k8sattributes, batch, batch/2 ] + exporters: [ otlp ] + metrics/prometheus: + receivers: [ prometheus ] + processors: [ resourcedetection, resource, k8sattributes, resource/cluster, batch, batch/2] + exporters: [ otlp ] + metrics/otlp: + receivers: [ otlp ] + processors: [ resourcedetection, resource, k8sattributes, resource/cluster, batch, batch/2] + exporters: [ otlp ] + metrics/k8s_cluster: + receivers: [ k8s_cluster ] + processors: [ resourcedetection, resource, k8sattributes, resource/cluster, batch, batch/2] + exporters: [ otlp ] diff --git a/charts/mw-aws-data-scraper/templates/cronjob.yaml b/charts/mw-aws-data-scraper/templates/cronjob.yaml new file mode 100644 index 0000000..3c52b4a --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/cronjob.yaml @@ -0,0 +1,44 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: mw-kube-agent-update + namespace: {{.Values.namespace.name}} +spec: + schedule: "{{ .Values.mw.configCheckInterval }}" # Adjust the schedule as needed + jobTemplate: + spec: + template: + metadata: + labels: + app: mw-app + k8s-app: mw-app + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: mw-kube-agent + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - mw-agent + - update + env: + - name: MW_TARGET + value: {{ .Values.mw.target }} + - name: MW_API_URL_FOR_CONFIG_CHECK + value: {{ .Values.apiUrlForConfigCheck }} + - name: MW_CONFIG_CHECK_INTERVAL + value: "60s" + - name: MW_KUBE_CLUSTER_NAME + value: {{ .Values.clusterMetadata.name }} + - name: MW_NAMESPACE + value: {{ .Values.namespace.name }} + - name: MW_API_KEY + value: {{ .Values.mw.apiKey }} + securityContext: + privileged: true + restartPolicy: OnFailure + serviceAccountName: mw-service-account-update diff --git a/charts/mw-aws-data-scraper/templates/hpa.yaml b/charts/mw-aws-data-scraper/templates/hpa.yaml new file mode 100644 index 0000000..81cd318 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/hpa.yaml @@ -0,0 +1,18 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: otel-collector-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ .Values.deployment.name }} + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/charts/mw-aws-data-scraper/templates/role-update.yaml b/charts/mw-aws-data-scraper/templates/role-update.yaml new file mode 100644 index 0000000..c6202d2 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/role-update.yaml @@ -0,0 +1,14 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: mw-app + name: mw-role-update + namespace: {{ .Values.namespace.name }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "update"] +- apiGroups: ["apps"] + resources: ["daemonsets", "deployments"] + verbs: ["get", "update"] \ No newline at end of file diff --git a/charts/mw-aws-data-scraper/templates/role.yaml b/charts/mw-aws-data-scraper/templates/role.yaml new file mode 100644 index 0000000..e67f31b --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/role.yaml @@ -0,0 +1,35 @@ +{{- if .Values.rbac.create -}} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "mw-kube-agent.labels" . | nindent 4 }} + name: {{ .Values.role.name }} + namespace: {{.Values.namespace.name}} +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: [{{ .Values.secret.certSecretName }}, {{ .Values.secret.csrfSecretName }}, {{ .Values.secret.keyHolderSecretName }}] + verbs: ["get", "update", "delete"] + - apiGroups: [""] + resources: ["pods", "pods/exec"] + verbs: ["get", "list", "delete", "patch", "create"] + # Allow Dashboard to get and update 'mw-app-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: [{{ .Values.daemonset.configMap.name }}, {{ .Values.deployment.configMap.name }}] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] +{{- end }} \ No newline at end of file diff --git a/charts/mw-aws-data-scraper/templates/rolebinding-update.yaml b/charts/mw-aws-data-scraper/templates/rolebinding-update.yaml new file mode 100644 index 0000000..74b617a --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/rolebinding-update.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: mw-app + name: mw-role-binding-update + namespace: {{ .Values.namespace.name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: mw-role-update +subjects: + - kind: ServiceAccount + name: mw-service-account-update + namespace: {{ .Values.namespace.name }} \ No newline at end of file diff --git a/charts/mw-aws-data-scraper/templates/rolebinding.yaml b/charts/mw-aws-data-scraper/templates/rolebinding.yaml new file mode 100644 index 0000000..2ebf62d --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/rolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.roleBinding.name }} + namespace: {{.Values.namespace.name}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Values.role.name }} +subjects: + - kind: ServiceAccount + name: {{ include "mw-kube-agent.serviceAccountName" . }} + namespace: {{.Values.namespace.name}} + +{{- end }} diff --git a/charts/mw-aws-data-scraper/templates/secret.yaml b/charts/mw-aws-data-scraper/templates/secret.yaml new file mode 100644 index 0000000..1fa1704 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: middleware-secret + namespace: {{ .Values.namespace.name }} + labels: + {{- include "mw-kube-agent.labels" . | nindent 4 }} +type: Opaque + +data: + api-key: {{ .Values.mw.apiKey | b64enc | quote }} \ No newline at end of file diff --git a/charts/mw-aws-data-scraper/templates/service.yaml b/charts/mw-aws-data-scraper/templates/service.yaml new file mode 100644 index 0000000..299d671 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.service.name }} + namespace: {{.Values.namespace.name}} + labels: + {{- include "mw-kube-agent.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc.port }} + targetPort: {{ .Values.service.grpc.targetPort }} + name: grpc + - port: {{ .Values.service.grpc2.port }} + targetPort: {{ .Values.service.grpc2.targetPort }} + name: grpc2 + - port: {{ .Values.service.http.port }} + targetPort: {{ .Values.service.http.targetPort }} + name: http + - port: {{ .Values.service.fluent.port }} + targetPort: {{ .Values.service.fluent.targetPort }} + name: fluent + selector: + {{- include ".Values.selectorLabels" . | nindent 4 }} diff --git a/charts/mw-aws-data-scraper/templates/serviceaccount-update.yaml b/charts/mw-aws-data-scraper/templates/serviceaccount-update.yaml new file mode 100644 index 0000000..7e5a47a --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/serviceaccount-update.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: mw-app + name: mw-service-account-update + namespace: {{.Values.namespace.name}} diff --git a/charts/mw-aws-data-scraper/templates/serviceaccount.yaml b/charts/mw-aws-data-scraper/templates/serviceaccount.yaml new file mode 100644 index 0000000..d8ea6c4 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mw-kube-agent.serviceAccountName" . }} + namespace: {{.Values.namespace.name}} + labels: + {{- include "mw-kube-agent.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/mw-aws-data-scraper/templates/statefulset-manager.yaml b/charts/mw-aws-data-scraper/templates/statefulset-manager.yaml new file mode 100644 index 0000000..192dbb8 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/statefulset-manager.yaml @@ -0,0 +1,65 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.deployment.name }} + namespace: {{.Values.namespace.name}} +spec: + selector: + matchLabels: + {{- include ".Values.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include ".Values.selectorLabels" . | nindent 8 }} + spec: + tolerations: + # these tolerations are to have the daemonset runnable on control plane nodes + # remove them if your control plane nodes should not run pods + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoExecute + + volumes: + - name: mw-deployment-otel-config-volume + configMap: + name: mw-deployment-otel-config + items: + - key: otel-config + path: otel-config-deployment.yaml + containers: + - args: + - mw-agent + - update + volumeMounts: + - name: mw-deployment-otel-config-volume + mountPath: /app + env: + - name: MW_AWS_JOBS_LIST_URL + value: https://test-keval.free.beeceptor.com/aws-jobs + - name: MW_TARGET + value: {{ .Values.mw.target }} + - name: MW_KUBE_CLUSTER_NAME + value: {{ .Values.clusterMetadata.name }} + - name: MW_NAMESPACE + value: {{ .Values.namespace.name }} + - name: MW_API_KEY + valueFrom: + secretKeyRef: + name: middleware-secret + key: api-key + resources: + {{- toYaml .Values.resources | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }} + securityContext: + privileged: true + restartPolicy: Always + serviceAccountName: {{ include "mw-kube-agent.serviceAccountName" . }} diff --git a/charts/mw-aws-data-scraper/templates/statefulset.yaml b/charts/mw-aws-data-scraper/templates/statefulset.yaml new file mode 100644 index 0000000..6fe15f6 --- /dev/null +++ b/charts/mw-aws-data-scraper/templates/statefulset.yaml @@ -0,0 +1,65 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.deployment.name }} + namespace: {{.Values.namespace.name}} +spec: + selector: + matchLabels: + {{- include ".Values.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include ".Values.selectorLabels" . | nindent 8 }} + spec: + tolerations: + # these tolerations are to have the daemonset runnable on control plane nodes + # remove them if your control plane nodes should not run pods + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoExecute + + volumes: + - name: mw-deployment-otel-config-volume + configMap: + name: mw-deployment-otel-config + items: + - key: otel-config + path: otel-config-deployment.yaml + containers: + - args: + - mw-agent + - start + - --otel-config-file + - /app/otel-config-deployment.yaml + volumeMounts: + - name: mw-deployment-otel-config-volume + mountPath: /app + env: + - name: MW_TARGET + value: {{ .Values.mw.target }} + - name: MW_KUBE_CLUSTER_NAME + value: {{ .Values.clusterMetadata.name }} + - name: MW_NAMESPACE + value: {{ .Values.namespace.name }} + - name: MW_API_KEY + valueFrom: + secretKeyRef: + name: middleware-secret + key: api-key + resources: + {{- toYaml .Values.resources | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }} + securityContext: + privileged: true + restartPolicy: Always + serviceAccountName: {{ include "mw-kube-agent.serviceAccountName" . }} diff --git a/charts/mw-aws-data-scraper/values.yaml b/charts/mw-aws-data-scraper/values.yaml new file mode 100644 index 0000000..05ffb85 --- /dev/null +++ b/charts/mw-aws-data-scraper/values.yaml @@ -0,0 +1,96 @@ +# Default values for mw-kube-agent. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +mw: + target: XXXXXXXXX + apiKey: XXXXXXXXX + configCheckInterval: "*/1 * * * *" + +clusterMetadata: + name: "unknown" + +replicaCount: 1 + +image: + repository: ghcr.io/middleware-labs/mw-kube-agent + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: awscloudwatchmetricsreceiver-keval + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +rbac: + create: true + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: mw-service-account + +apiUrlForConfigCheck: https://app.middleware.io + +namespace: + name: mw-agent-ns + +secret: + type: Opaque + certSecretName: mw-app-certs + csrfSecretName: mw-app-csrf + keyHolderSecretName: mw-app-key-holder + + + +role: + name: mw-role + +roleBinding: + name: mw-role-binding + +clusterRole: + name: mw-cluster-role + +clusterRoleBinding: + name: mw-cluster-role-binding + +service: + name: mw-kube-agent-svc + type: NodePort + http: + port: 9320 + targetPort: 9320 + grpc: + port: 443 + targetPort: 8443 + grpc2: + port: 9319 + targetPort: 9319 + fluent: + port: 8006 + targetPort: 8006 + +daemonset: + name: mw-kube-agent + configMap: + name: mw-daemonset-otel-config +deployment: + name: mw-kube-agent + configMap: + name: mw-deployment-otel-config + +resources: + requests: + cpu: "100m" # Default CPU request + memory: "128Mi" # Default memory request + limits: + cpu: "200m" # Default CPU limit + memory: "256Mi" # Default memory limit + +selectorLabels: + k8s-app: mw-app \ No newline at end of file