Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added MW AWS scraper chart #16

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
24 changes: 24 additions & 0 deletions charts/mw-aws-data-scraper/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: v2
name: mw-kube-agent-v2
description: A Helm chart for Kubernetes

# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application

# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 2.0.7

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.1.3"
62 changes: 62 additions & 0 deletions charts/mw-aws-data-scraper/templates/_helpers.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "mw-kube-agent.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "mw-kube-agent.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}

{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "mw-kube-agent.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Common labels
*/}}
{{- define "mw-kube-agent.labels" -}}
helm.sh/chart: {{ include "mw-kube-agent.chart" . }}
{{ include ".Values.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}

{{/*
Selector labels
*/}}
{{- define ".Values.selectorLabels" -}}
app.kubernetes.io/name: {{ include "mw-kube-agent.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

{{/*
Create the name of the service account to use
*/}}
{{- define "mw-kube-agent.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "mw-kube-agent.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
51 changes: 51 additions & 0 deletions charts/mw-aws-data-scraper/templates/clusterrole.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
{{- include "mw-kube-agent.labels" . | nindent 4 }}
name: {{ .Values.clusterRole.name }}
namespace: {{.Values.namespace.name}}
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]

# Other resources
- apiGroups: [""]
resources: ["nodes", "nodes/stats", "namespaces", "pods", "serviceaccounts", "services", "configmaps", "endpoints", "persistentvolumeclaims", "replicationcontrollers", "replicationcontrollers/scale", "persistentvolumeclaims", "persistentvolumes", "bindings", "events", "limitranges", "namespaces/status", "pods/log", "pods/status", "replicationcontrollers/status", "resourcequotas", "resourcequotas/status"]
verbs: ["get", "list", "watch"]

- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "deployments/scale", "replicasets", "replicasets/scale", "statefulsets"]
verbs: ["get", "list", "watch", "patch"]

- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]

- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["get", "list", "watch"]

- apiGroups: ["extensions"]
resources: ["daemonsets", "deployments", "deployments/scale", "networkpolicies", "replicasets", "replicasets/scale", "replicationcontrollers/scale"]
verbs: ["get", "list", "watch"]

- apiGroups: ["networking.k8s.io"]
resources: ["ingresses", "networkpolicies"]
verbs: ["get", "list", "watch"]

- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch"]

- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments"]
verbs: ["get", "list", "watch"]

- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings", "clusterroles", "roles", "rolebindings", ]
verbs: ["get", "list", "watch"]
{{- end }}
15 changes: 15 additions & 0 deletions charts/mw-aws-data-scraper/templates/clusterrolebinding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{.Values.clusterRoleBinding.name}}
namespace: {{.Values.namespace.name}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{.Values.clusterRole.name}}
subjects:
- kind: ServiceAccount
name: {{ include "mw-kube-agent.serviceAccountName" . }}
namespace: {{.Values.namespace.name}}
{{- end }}
222 changes: 222 additions & 0 deletions charts/mw-aws-data-scraper/templates/configmap-deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.deployment.configMap.name }}
namespace: {{.Values.namespace.name}}
labels:
{{- include "mw-kube-agent.labels" . | nindent 4 }}
data:
otel-config: |
receivers:
k8s_cluster:
auth_type: serviceAccount
collection_interval: 15s
node_conditions_to_report: [ Ready, DiskPressure, MemoryPressure, PIDPressure, NetworkUnavailable ]
distribution: kubernetes
allocatable_types_to_report: [ cpu, memory, ephemeral-storage, storage ]

prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
scrape_interval: 5s
static_configs:
- targets: ["0.0.0.0:8888"]

fluentforward:
endpoint: 0.0.0.0:8006
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:9319
http:
endpoint: 0.0.0.0:9320
exporters:
logging:
loglevel: warn
otlp:
endpoint: ${MW_TARGET}
processors:
k8sattributes:
auth_type: "serviceAccount"
passthrough: false
filter:
node_from_env_var: KUBE_NODE_NAME
extract:
metadata:
- k8s.pod.name
- k8s.pod.uid
- k8s.deployment.name
- k8s.namespace.name
- k8s.node.name
- k8s.pod.start_time
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
resource:
attributes:
- key: host.id
from_attribute: host.name
action: upsert
- key: mw.account_key
action: insert
value: ${MW_API_KEY}
- key: agent.installation.time
from_attribute: host.name
action: insert
- key: agent.installation.time
value: ${MW_AGENT_INSTALLATION_TIME}
action: update
- key: k8s.cluster.name
from_attribute: k8s.node.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.namespace.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.pod.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.container.name
action: insert
- key: k8s.cluster.name
from_attribute: k8s.replicaset.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.statefulset.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.cronjob.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.job.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.daemonset.uid
action: insert
- key: k8s.cluster.name
from_attribute: k8s.deployment.uid
action: insert
resource/hostmetrics:
attributes:
- key: is.k8s.node
action: insert
value: "yes"

resource/cluster:
attributes:
- key: k8s.cluster.name
action: update
value: {{ .Values.clusterMetadata.name }}
- key: host.id
action: update
from_attribute: k8s.node.name
- key: host.name
action: update
from_attribute: k8s.node.name

resource/logs:
attributes:
- key: service.name
action: insert
value: middleware-logs

resourcedetection:
detectors: [ env, system, docker ]
system:
hostname_sources: ["os"]
timeout: 2s
override: false
batch:
batch/2:
send_batch_size: 2000
timeout: 10s
attributes/traces:
actions:
- key: mw.service.name.derived
from_attribute: db.system
action: insert
- key: mw.service.name.derived
from_attribute: messaging.system
action: insert
- key: mw.service.name.derived
from_attribute: rpc.system
action: insert
- key: mw.service.name.derived
from_attribute: http.scheme
action: insert
- key: mw.service.name.derived
from_attribute: faas.trigger
action: insert
attributes/logs:
actions:
- key: source
from_attribute: name
action: upsert
- key: source
from_attribute: operator_type
action: upsert
- key: source
from_attribute: log.file.name
action: upsert
- key: source
from_attribute: fluent.tag
action: upsert
- key: source
from_attribute: service.name
action: upsert
- key: source
from_attribute: project.name
action: upsert
- key: source
from_attribute: serviceName
action: upsert
- key: source
from_attribute: projectName
action: upsert
- key: source
from_attribute: pod_name
action: upsert
- key: source
from_attribute: container_name
action: upsert
- key: source
from_attribute: namespace
action: upsert
service:
telemetry:
logs:
level: "fatal"
metrics:
address: 0.0.0.0:8888
pipelines:
traces/otlp:
receivers: [ otlp ]
processors: [ resourcedetection,resource, resource/cluster, attributes/traces, batch, batch/2 ]
exporters: [ otlp ]
logs/fluentforward:
receivers: [ fluentforward ]
processors: [ resourcedetection, resource, resource/cluster, attributes/logs, resource/logs, k8sattributes, batch, batch/2 ]
exporters: [ otlp ]
logs/otlp:
receivers: [ otlp ]
processors: [ resourcedetection, resource, resource/cluster, attributes/logs, resource/logs, k8sattributes, batch, batch/2 ]
exporters: [ otlp ]
metrics/prometheus:
receivers: [ prometheus ]
processors: [ resourcedetection, resource, k8sattributes, resource/cluster, batch, batch/2]
exporters: [ otlp ]
metrics/otlp:
receivers: [ otlp ]
processors: [ resourcedetection, resource, k8sattributes, resource/cluster, batch, batch/2]
exporters: [ otlp ]
metrics/k8s_cluster:
receivers: [ k8s_cluster ]
processors: [ resourcedetection, resource, k8sattributes, resource/cluster, batch, batch/2]
exporters: [ otlp ]
Loading