-
Notifications
You must be signed in to change notification settings - Fork 1
/
values.yaml
1449 lines (1176 loc) · 59.8 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
nameOverride: # ""
fullnameOverride: # ""
targetSystem: "linux"
registry: gcr.io/datadoghq
datadog:
apiKey:
clusterName: testkube
# datadog.site -- The site of the Datadog intake to send Agent data to
## Set to 'datadoghq.eu' to send data to the EU site.
site: # datadoghq.com
# datadog.dd_url -- The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL
## Overrides the site setting defined in "site".
dd_url: # https://app.datadoghq.com
# datadog.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, off
logLevel: INFO
# datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment
## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics
kubeStateMetricsEnabled: true
kubeStateMetricsNetworkPolicy:
# datadog.kubeStateMetricsNetworkPolicy.create -- If true, create a NetworkPolicy for kube state metrics
create: false
kubeStateMetricsCore:
# datadog.kubeStateMetricsCore.enabled -- Enable the kubernetes_state_core check in the Cluster Agent (Requires Cluster Agent 1.12.0+)
## ref: https://docs.datadoghq.com/integrations/kubernetes_state_core
enabled: false
# datadog.kubeStateMetricsCore.ignoreLegacyKSMCheck -- Disable the auto-configuration of legacy kubernetes_state check (taken into account only when datadog.kubeStateMetricsCore.enabled is true)
## Disabling this field is not recommended as it results in enabling both checks, it can be useful though during the migration phase.
## Migration guide: https://docs.datadoghq.com/integrations/kubernetes_state_core/?tab=helm#migration-from-kubernetes_state-to-kubernetes_state_core
ignoreLegacyKSMCheck: true
# datadog.kubeStateMetricsCore.collectSecretMetrics -- Enable watching secret objects and collecting their corresponding metrics kubernetes_state.secret.*
## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check.
collectSecretMetrics: true
# datadog.kubeStateMetricsCore.useClusterCheckRunners -- For large clusters where the Kubernetes State Metrics Check Core needs to be distributed on dedicated workers.
## Configuring this field will create a separate deployment which will run Cluster Checks, including Kubernetes State Metrics Core.
## ref: https://docs.datadoghq.com/agent/cluster_agent/clusterchecksrunner?tab=helm
useClusterCheckRunners: false
# datadog.kubeStateMetricsCore.labelsAsTags -- Extra labels to collect from resources and to turn into datadog tag.
## It has the following structure:
## labelsAsTags:
## <resource1>: # can be pod, deployment, node, etc.
## <label1>: <tag1> # where <label1> is the kubernetes label and <tag1> is the datadog tag
## <label2>: <tag2>
## <resource2>:
## <label3>: <tag3>
##
## Warning: the label must match the transformation done by kube-state-metrics,
## for example tags.datadoghq.com/version becomes label_tags_datadoghq_com_version.
labelsAsTags: {}
# pod:
# app: app
# node:
# zone: zone
# team: team
## Manage Cluster checks feature
## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/
## Autodiscovery via Kube Service annotations is automatically enabled
clusterChecks:
# datadog.clusterChecks.enabled -- Enable the Cluster Checks feature on both the cluster-agents and the daemonset
enabled: true
# datadog.clusterChecks.shareProcessNamespace -- Set the process namespace sharing on the cluster checks agent
shareProcessNamespace: false
# datadog.nodeLabelsAsTags -- Provide a mapping of Kubernetes Node Labels to Datadog Tags
nodeLabelsAsTags: {}
# beta.kubernetes.io/instance-type: aws-instance-type
# kubernetes.io/role: kube_role
# <KUBERNETES_NODE_LABEL>: <DATADOG_TAG_KEY>
# datadog.podLabelsAsTags -- Provide a mapping of Kubernetes Labels to Datadog Tags
podLabelsAsTags: {}
# app: kube_app
# release: helm_release
# <KUBERNETES_LABEL>: <DATADOG_TAG_KEY>
# datadog.podAnnotationsAsTags -- Provide a mapping of Kubernetes Annotations to Datadog Tags
podAnnotationsAsTags: {}
# iam.amazonaws.com/role: kube_iamrole
# <KUBERNETES_ANNOTATIONS>: <DATADOG_TAG_KEY>
# datadog.namespaceLabelsAsTags -- Provide a mapping of Kubernetes Namespace Labels to Datadog Tags
namespaceLabelsAsTags: {}
# env: environment
# <KUBERNETES_NAMESPACE_LABEL>: <DATADOG_TAG_KEY>
# datadog.tags -- List of static tags to attach to every metric, event and service check collected by this Agent.
## Learn more about tagging: https://docs.datadoghq.com/tagging/
tags: []
# - "<KEY_1>:<VALUE_1>"
# - "<KEY_2>:<VALUE_2>"
# datadog.checksCardinality -- Sets the tag cardinality for the checks run by the Agent.
## https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=containerizedenvironments#environment-variables
checksCardinality: # low, orchestrator or high (not set by default to avoid overriding existing DD_CHECKS_TAG_CARDINALITY configurations, the default value in the Agent is low)
# kubelet configuration
kubelet:
# datadog.kubelet.host -- Override kubelet IP
host:
valueFrom:
fieldRef:
fieldPath: status.hostIP
# datadog.kubelet.tlsVerify -- Toggle kubelet TLS verification
# @default -- true
tlsVerify: false
# datadog.kubelet.hostCAPath -- Path (on host) where the Kubelet CA certificate is stored
# @default -- None (no mount from host)
hostCAPath:
# datadog.kubelet.agentCAPath -- Path (inside Agent containers) where the Kubelet CA certificate is stored
# @default -- /var/run/host-kubelet-ca.crt if hostCAPath else /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
agentCAPath:
# datadog.kubelet.podLogsPath -- Path (on host) where the PODs logs are located
# @default -- /var/log/pods on Linux, C:\var\log\pods on Windows
podLogsPath:
# datadog.expvarPort -- Specify the port to expose pprof and expvar to not interfer with the agentmetrics port from the cluster-agent, which defaults to 5000
expvarPort: 6000
## dogstatsd configuration
## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/
## To emit custom metrics from your Kubernetes application, use DogStatsD.
dogstatsd:
# datadog.dogstatsd.port -- Override the Agent DogStatsD port
## Note: Make sure your client is sending to the same UDP port.
port: 8125
# datadog.dogstatsd.originDetection -- Enable origin detection for container tagging
## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging
originDetection: false
# datadog.dogstatsd.tags -- List of static tags to attach to every custom metric, event and service check collected by Dogstatsd.
## Learn more about tagging: https://docs.datadoghq.com/tagging/
tags: []
# - "<KEY_1>:<VALUE_1>"
# - "<KEY_2>:<VALUE_2>"
# datadog.dogstatsd.tagCardinality -- Sets the tag cardinality relative to the origin detection
## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging
tagCardinality: low
# datadog.dogstatsd.useSocketVolume -- Enable dogstatsd over Unix Domain Socket with an HostVolume
## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/
useSocketVolume: true
# datadog.dogstatsd.socketPath -- Path to the DogStatsD socket
socketPath: /var/run/datadog/dsd.socket
# datadog.dogstatsd.hostSocketPath -- Host path to the DogStatsD socket
hostSocketPath: /var/run/datadog/
# datadog.dogstatsd.useHostPort -- Sets the hostPort to the same value of the container port
## Needs to be used for sending custom metrics.
## The ports need to be available on all hosts.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
useHostPort: false
# datadog.dogstatsd.useHostPID -- Run the agent in the host's PID namespace
## This is required for Dogstatsd origin detection to work.
## See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/
useHostPID: false
# datadog.dogstatsd.nonLocalTraffic -- Enable this to make each node accept non-local statsd traffic (from outside of the pod)
## ref: https://github.com/DataDog/docker-dd-agent#environment-variables
nonLocalTraffic: true
# datadog.collectEvents -- Enables this to start event collection from the kubernetes API
## ref: https://docs.datadoghq.com/agent/kubernetes/#event-collection
collectEvents: true
# datadog.leaderElection -- Enables leader election mechanism for event collection
leaderElection: true
# datadog.leaderLeaseDuration -- Set the lease time for leader election in second
leaderLeaseDuration: # 60
## Enable logs agent and provide custom configs
logs:
# datadog.logs.enabled -- Enables this to activate Datadog Agent log collection
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
enabled: true
# datadog.logs.containerCollectAll -- Enable this to allow log collection for all containers
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
containerCollectAll: true
# datadog.logs.containerCollectUsingFiles -- Collect logs from files in /var/log/pods instead of using container runtime API
## It's usually the most efficient way of collecting logs.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
containerCollectUsingFiles: true
# datadog.logs.autoMultiLineDetection -- Allows the Agent to detect common multi-line patterns automatically.
## ref: https://docs.datadoghq.com/agent/logs/advanced_log_collection/?tab=configurationfile#automatic-multi-line-aggregation
autoMultiLineDetection: false
## Enable apm agent and provide custom configs
apm:
# datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
socketEnabled: true
# datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
portEnabled: false
# datadog.apm.enabled -- Enable this to enable APM and tracing, on port 8126
# DEPRECATED. Use datadog.apm.portEnabled instead
## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host
enabled: false
# datadog.apm.port -- Override the trace Agent port
## Note: Make sure your client is sending to the same UDP port.
port: 8126
# datadog.apm.useSocketVolume -- Enable APM over Unix Domain Socket
# DEPRECATED. Use datadog.apm.socketEnabled instead
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
useSocketVolume: false
# datadog.apm.socketPath -- Path to the trace-agent socket
socketPath: /var/run/datadog/apm.socket
# datadog.apm.hostSocketPath -- Host path to the trace-agent socket
hostSocketPath: /var/run/datadog/
# datadog.envFrom -- Set environment variables for all Agents directly from configMaps and/or secrets
## envFrom to pass configmaps or secrets as environment
envFrom: []
# - configMapRef:
# name: <CONFIGMAP_NAME>
# - secretRef:
# name: <SECRET_NAME>
# datadog.env -- Set environment variables for all Agents
## The Datadog Agent supports many environment variables.
## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables
env:
- name: DD_IGNORE_AUTOCONF
value: "etcd"
# datadog.confd -- Provide additional check configurations (static and Autodiscovery)
## Each key becomes a file in /conf.d
## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#optional-volumes
## ref: https://docs.datadoghq.com/agent/autodiscovery/
confd:
etcd.yaml: |-
ad_identifiers:
- etcd
init_config:
instances:
- prometheus_url: https://%%host%%:2379/metrics
tls_ca_cert: /host/etcd/certs/ca.crt
tls_cert: /host/etcd/certs/healthcheck-client.crt
tls_private_key: /host/etcd/certs/healthcheck-client.key
ssl_verify: false
# redisdb.yaml: |-
# init_config:
# instances:
# - host: "name"
# port: "6379"
# kubernetes_state.yaml: |-
# ad_identifiers:
# - kube-state-metrics
# init_config:
# instances:
# - kube_state_url: http://%%host%%:8080/metrics
# datadog.checksd -- Provide additional custom checks as python code
## Each key becomes a file in /checks.d
## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#optional-volumes
checksd: {}
# service.py: |-
# datadog.dockerSocketPath -- Path to the docker socket
dockerSocketPath: # /var/run/docker.sock
# datadog.criSocketPath -- Path to the container runtime socket (if different from Docker)
criSocketPath: # /var/run/containerd/containerd.sock
# Configure how the agent interact with the host's container runtime
containerRuntimeSupport:
# datadog.containerRuntimeSupport.enabled -- Set this to false to disable agent access to container runtime.
enabled: true
## Enable process agent and provide custom configs
processAgent:
# datadog.processAgent.enabled -- Set this to true to enable live process monitoring agent
## Note: /etc/passwd is automatically mounted to allow username resolution.
## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset
enabled: true
# datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent
## Requires processAgent.enabled to be set to true to have any effect
processCollection: true
# datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes
## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect
## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing
stripProcessArguments: false
# datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations
processDiscovery: true
## Enable systemProbe agent and provide custom configs
systemProbe:
# datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent
debugPort: 0
# datadog.systemProbe.enableConntrack -- Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data
## Ref: http://conntrack-tools.netfilter.org/
enableConntrack: true
# datadog.systemProbe.seccomp -- Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges
## Note that this will break `kubectl exec … -c system-probe -- /bin/bash`
seccomp: localhost/system-probe
# datadog.systemProbe.seccompRoot -- Specify the seccomp profile root directory
seccompRoot: /var/lib/kubelet/seccomp
# datadog.systemProbe.bpfDebug -- Enable logging for kernel debug
bpfDebug: false
# datadog.systemProbe.apparmor -- Specify a apparmor profile for system-probe
apparmor: unconfined
# datadog.systemProbe.enableTCPQueueLength -- Enable the TCP queue length eBPF-based check
enableTCPQueueLength: false
# datadog.systemProbe.enableOOMKill -- Enable the OOM kill eBPF-based check
enableOOMKill: false
# datadog.systemProbe.enableRuntimeCompiler -- Enable the runtime compiler for eBPF probes
enableRuntimeCompiler: false
# datadog.systemProbe.mountPackageManagementDirs -- Enables mounting of specific package management directories when runtime compilation is enabled
mountPackageManagementDirs: []
## For runtime compilation to be able to download kernel headers, the host's package management folders
## must be mounted to the /host directory. For example, for Ubuntu & Debian the following mount would be necessary:
# - name: "apt-config-dir"
# hostPath: /etc/apt
# mountPath: /host/etc/apt
## If this list is empty, then all necessary package management directories (for all supported OSs) will be mounted.
# datadog.systemProbe.osReleasePath -- Specify the path to your os-release file if you don't want to attempt mounting all `/etc/*-release` file by default
osReleasePath:
# datadog.systemProbe.runtimeCompilationAssetDir -- Specify a directory for runtime compilation assets to live in
runtimeCompilationAssetDir: /var/tmp/datadog-agent/system-probe
# datadog.systemProbe.collectDNSStats -- Enable DNS stat collection
collectDNSStats: true
# datadog.systemProbe.maxTrackedConnections -- the maximum number of tracked connections
maxTrackedConnections: 131072
# datadog.systemProbe.conntrackMaxStateSize -- the maximum size of the userspace conntrack cache
conntrackMaxStateSize: 131072 # 2 * maxTrackedConnections by default, per https://github.com/DataDog/datadog-agent/blob/d1c5de31e1bba72dfac459aed5ff9562c3fdcc20/pkg/process/config/config.go#L229
# datadog.systemProbe.conntrackInitTimeout -- the time to wait for conntrack to initialize before failing
conntrackInitTimeout: 10s
orchestratorExplorer:
# datadog.orchestratorExplorer.enabled -- Set this to false to disable the orchestrator explorer
## This requires processAgent.enabled and clusterAgent.enabled to be set to true
## ref: TODO - add doc link
enabled: true
# datadog.orchestratorExplorer.container_scrubbing -- Enable the scrubbing of containers in the kubernetes resource YAML for sensitive information
## The container scrubbing is taking significant resources during data collection.
## If you notice that the cluster-agent uses too much CPU in larger clusters
## turning this option off will improve the situation.
container_scrubbing:
enabled: true
helmCheck:
# datadog.helmCheck.enabled -- Set this to true to enable the Helm check (Requires Agent 7.35.0+ and Cluster Agent 1.19.0+)
# This requires clusterAgent.enabled to be set to true
enabled: false
# datadog.helmCheck.collectEvents -- Set this to true to enable event collection in the Helm Check (Requires Agent 7.36.0+ and Cluster Agent 1.20.0+)
# This requires datadog.HelmCheck.enabled to be set to true
collectEvents: false
networkMonitoring:
# datadog.networkMonitoring.enabled -- Enable network performance monitoring
enabled: false
## Universal Service Monitoring is currently in private beta.
## See https://www.datadoghq.com/blog/universal-service-monitoring-datadog/ for more details and private beta signup.
serviceMonitoring:
# datadog.serviceMonitoring.enabled -- Enable Universal Service Monitoring
enabled: false
## Enable security agent and provide custom configs
securityAgent:
compliance:
# datadog.securityAgent.compliance.enabled -- Set to true to enable Cloud Security Posture Management (CSPM)
enabled: false
# datadog.securityAgent.compliance.configMap -- Contains CSPM compliance benchmarks that will be used
configMap:
# datadog.securityAgent.compliance.checkInterval -- Compliance check run interval
checkInterval: 20m
runtime:
# datadog.securityAgent.runtime.enabled -- Set to true to enable Cloud Workload Security (CWS)
enabled: false
policies:
# datadog.securityAgent.runtime.policies.configMap -- Contains CWS policies that will be used
configMap:
syscallMonitor:
# datadog.securityAgent.runtime.syscallMonitor.enabled -- Set to true to enable the Syscall monitoring (recommended for troubleshooting only)
enabled: false
## Manage NetworkPolicy
networkPolicy:
# datadog.networkPolicy.create -- If true, create NetworkPolicy for all the components
create: false
# datadog.networkPolicy.flavor -- Flavor of the network policy to use.
# Can be:
# * kubernetes for networking.k8s.io/v1/NetworkPolicy
# * cilium for cilium.io/v2/CiliumNetworkPolicy
flavor: kubernetes
cilium:
# datadog.networkPolicy.cilium.dnsSelector -- Cilium selector of the DNS server entity
# @default -- kube-dns in namespace kube-system
dnsSelector:
toEndpoints:
- matchLabels:
"k8s:io.kubernetes.pod.namespace": kube-system
"k8s:k8s-app": kube-dns
## Configure prometheus scraping autodiscovery
## ref: https://docs.datadoghq.com/agent/kubernetes/prometheus/
prometheusScrape:
# datadog.prometheusScrape.enabled -- Enable autodiscovering pods and services exposing prometheus metrics.
enabled: true
# datadog.prometheusScrape.serviceEndpoints -- Enable generating dedicated checks for service endpoints.
serviceEndpoints: false
# datadog.prometheusScrape.additionalConfigs -- Allows adding advanced openmetrics check configurations with custom discovery rules. (Requires Agent version 7.27+)
additionalConfigs: []
# -
# autodiscovery:
# kubernetes_annotations:
# include:
# custom_include_label: 'true'
# exclude:
# custom_exclude_label: 'true'
# kubernetes_container_names:
# - my-app
# configurations:
# - send_distribution_buckets: true
# timeout: 5
# datadog.prometheusScrape.version -- Version of the openmetrics check to schedule by default.
# See https://datadoghq.dev/integrations-core/legacy/prometheus/#config-changes-between-versions for the differences between the two versions.
# (Version 2 requires Agent version 7.34+)
version: 2
# datadog.ignoreAutoConfig -- List of integration to ignore auto_conf.yaml.
## ref: https://docs.datadoghq.com/agent/faq/auto_conf/
ignoreAutoConfig: []
# - redisdb
# - kubernetes_state
# datadog.containerExclude -- Exclude containers from the Agent
# Autodiscovery, as a space-sepatered list
## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers
containerExclude: # "image:datadog/agent"
# datadog.containerInclude -- Include containers in the Agent Autodiscovery,
# as a space-separated list. If a container matches an include rule, it’s
# always included in the Autodiscovery
## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers
containerInclude:
# datadog.containerExcludeLogs -- Exclude logs from the Agent Autodiscovery,
# as a space-separated list
containerExcludeLogs:
# datadog.containerIncludeLogs -- Include logs in the Agent Autodiscovery, as
# a space-separated list
containerIncludeLogs:
# datadog.containerExcludeMetrics -- Exclude metrics from the Agent
# Autodiscovery, as a space-separated list
containerExcludeMetrics:
# datadog.containerIncludeMetrics -- Include metrics in the Agent
# Autodiscovery, as a space-separated list
containerIncludeMetrics:
# datadog.excludePauseContainer -- Exclude pause containers from the Agent
# Autodiscovery.
## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#pause-containers
excludePauseContainer: true
## This is the Datadog Cluster Agent implementation that handles cluster-wide
## metrics more cleanly, separates concerns for better rbac, and implements
## the external metrics API so you can autoscale HPAs based on datadog metrics
## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/
clusterAgent:
# clusterAgent.enabled -- Set this to false to disable Datadog Cluster Agent
enabled: true
# clusterAgent.shareProcessNamespace -- Set the process namespace sharing on the Datadog Cluster Agent
shareProcessNamespace: false
## Define the Datadog Cluster-Agent image to work with
image:
# clusterAgent.image.name -- Cluster Agent image name to use (relative to `registry`)
name: cluster-agent
# clusterAgent.image.tag -- Cluster Agent image tag to use
tag: 1.18.0
# clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent
repository:
# clusterAgent.image.pullPolicy -- Cluster Agent image pullPolicy
pullPolicy: IfNotPresent
# clusterAgent.image.pullSecrets -- Cluster Agent repository pullSecret (ex: specify docker registry credentials)
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
pullSecrets: []
# - name: "<REG_SECRET>"
# clusterAgent.securityContext -- Allows you to overwrite the default PodSecurityContext on the cluster-agent pods.
securityContext: {}
containers:
clusterAgent:
# clusterAgent.containers.clusterAgent.securityContext -- Specify securityContext on the cluster-agent container.
securityContext: {}
# clusterAgent.command -- Command to run in the Cluster Agent container as entrypoint
command: []
# clusterAgent.token -- Cluster Agent token is a preshared key between node agents and cluster agent (autogenerated if empty, needs to be at least 32 characters a-zA-z)
token: ""
# clusterAgent.tokenExistingSecret -- Existing secret name to use for Cluster Agent token
tokenExistingSecret: ""
# clusterAgent.replicas -- Specify the of cluster agent replicas, if > 1 it allow the cluster agent to work in HA mode.
replicas: 1
## Provide Cluster Agent Deployment pod(s) RBAC configuration
rbac:
# clusterAgent.rbac.create -- If true, create & use RBAC resources
create: true
# clusterAgent.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterAgent.rbac.create is false
serviceAccountName: default
# clusterAgent.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterAgent.rbac.create is true
serviceAccountAnnotations: {}
## Provide Cluster Agent pod security configuration
podSecurity:
podSecurityPolicy:
# clusterAgent.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Cluster Agent pods
create: false
securityContextConstraints:
# clusterAgent.podSecurity.securityContextConstraints.create -- If true, create a SCC resource for Cluster Agent pods
create: false
# Enable the metricsProvider to be able to scale based on metrics in Datadog
metricsProvider:
# clusterAgent.metricsProvider.enabled -- Set this to true to enable Metrics Provider
enabled: false
# clusterAgent.metricsProvider.wpaController -- Enable informer and controller of the watermark pod autoscaler
## NOTE: You need to install the `WatermarkPodAutoscaler` CRD before
wpaController: false
# clusterAgent.metricsProvider.useDatadogMetrics -- Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries
## NOTE: It will install DatadogMetrics CRD automatically (it may conflict with previous installations)
useDatadogMetrics: false
# clusterAgent.metricsProvider.createReaderRbac -- Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent)
createReaderRbac: true
# clusterAgent.metricsProvider.aggregator -- Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum)
aggregator: avg
## Configuration for the service for the cluster-agent metrics server
service:
# clusterAgent.metricsProvider.service.type -- Set type of cluster-agent metrics server service
type: ClusterIP
# clusterAgent.metricsProvider.service.port -- Set port of cluster-agent metrics server service (Kubernetes >= 1.15)
port: 8443
# clusterAgent.metricsProvider.endpoint -- Override the external metrics provider endpoint. If not set, the cluster-agent defaults to `datadog.site`
endpoint: # https://api.datadoghq.com
# clusterAgent.env -- Set environment variables specific to Cluster Agent
## The Cluster-Agent supports many additional environment variables
## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options
env: []
# clusterAgent.envFrom -- Set environment variables specific to Cluster Agent from configMaps and/or secrets
## The Cluster-Agent supports many additional environment variables
## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options
envFrom: []
# - configMapRef:
# name: <CONFIGMAP_NAME>
# - secretRef:
# name: <SECRET_NAME>
admissionController:
# clusterAgent.admissionController.enabled -- Enable the admissionController to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods
enabled: false
# clusterAgent.admissionController.mutateUnlabelled -- Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"'
mutateUnlabelled: false
# clusterAgent.confd -- Provide additional cluster check configurations. Each key will become a file in /conf.d.
## ref: https://docs.datadoghq.com/agent/autodiscovery/
confd: {}
# mysql.yaml: |-
# cluster_check: true
# instances:
# - host: <EXTERNAL_IP>
# port: 3306
# username: datadog
# password: <YOUR_CHOSEN_PASSWORD>
# clusterAgent.advancedConfd -- Provide additional cluster check configurations. Each key is an integration containing several config files.
## ref: https://docs.datadoghq.com/agent/autodiscovery/
advancedConfd: {}
# mysql.d:
# 1.yaml: |-
# cluster_check: true
# instances:
# - host: <EXTERNAL_IP>
# port: 3306
# username: datadog
# password: <YOUR_CHOSEN_PASSWORD>
# 2.yaml: |-
# cluster_check: true
# instances:
# - host: <EXTERNAL_IP>
# port: 3306
# username: datadog
# password: <YOUR_CHOSEN_PASSWORD>
# clusterAgent.resources -- Datadog cluster-agent resource requests and limits.
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
# clusterAgent.priorityClassName -- Name of the priorityClass to apply to the Cluster Agent
priorityClassName: # system-cluster-critical
# clusterAgent.nodeSelector -- Allow the Cluster Agent Deployment to be scheduled on selected nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# clusterAgent.tolerations -- Allow the Cluster Agent Deployment to schedule on tainted nodes ((requires Kubernetes >= 1.6))
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# clusterAgent.affinity -- Allow the Cluster Agent Deployment to schedule using affinity rules
## By default, Cluster Agent Deployment Pods are forced to run on different Nodes.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# clusterAgent.healthPort -- Port number to use in the Cluster Agent for the healthz endpoint
healthPort: 5556
# clusterAgent.livenessProbe -- Override default Cluster Agent liveness probe settings
# @default -- Every 15s / 6 KO / 1 OK
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
# clusterAgent.readinessProbe -- Override default Cluster Agent readiness probe settings
# @default -- Every 15s / 6 KO / 1 OK
readinessProbe:
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
# clusterAgent.strategy -- Allow the Cluster Agent deployment to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
# clusterAgent.deploymentAnnotations -- Annotations to add to the cluster-agents's deployment
deploymentAnnotations: {}
# key: "value"
# clusterAgent.podAnnotations -- Annotations to add to the cluster-agents's pod(s)
podAnnotations: {}
# key: "value"
# clusterAgent.useHostNetwork -- Bind ports on the hostNetwork
## Useful for CNI networking where hostPort might
## not be supported. The ports need to be available on all hosts. It can be
## used for custom metrics instead of a service endpoint.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostNetwork: false
# clusterAgent.dnsConfig -- Specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
# clusterAgent.volumes -- Specify additional volumes to mount in the cluster-agent container
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
# clusterAgent.volumeMounts -- Specify additional volumes to mount in the cluster-agent container
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
# clusterAgent.datadog_cluster_yaml -- Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml)
datadog_cluster_yaml: {}
# clusterAgent.createPodDisruptionBudget -- Create pod disruption budget for Cluster Agent deployments
createPodDisruptionBudget: false
networkPolicy:
# clusterAgent.networkPolicy.create -- If true, create a NetworkPolicy for the cluster agent.
# DEPRECATED. Use datadog.networkPolicy.create instead
create: false
# clusterAgent.additionalLabels -- Adds labels to the Cluster Agent deployment and pods
additionalLabels: {}
# key: "value"
## This section lets you configure the agents deployed by this chart to connect to a Cluster Agent
## deployed independently
existingClusterAgent:
# existingClusterAgent.join -- set this to true if you want the agents deployed by this chart to
# connect to a Cluster Agent deployed independently
join: false
# existingClusterAgent.tokenSecretName -- Existing secret name to use for external Cluster Agent token
tokenSecretName: # <EXISTING_DCA_SECRET_NAME>
# existingClusterAgent.serviceName -- Existing service name to use for reaching the external Cluster Agent
serviceName: # <EXISTING_DCA_SERVICE_NAME>
# existingClusterAgent.clusterchecksEnabled -- set this to false if you don’t want the agents to run the cluster checks of the joined external cluster agent
clusterchecksEnabled: true
agents:
# agents.enabled -- You should keep Datadog DaemonSet enabled!
## The exceptional case could be a situation when you need to run
## single Datadog pod per every namespace, but you do not need to
## re-create a DaemonSet for every non-default namespace install.
## Note: StatsD and DogStatsD work over UDP, so you may not
## get guaranteed delivery of the metrics in Datadog-per-namespace setup!
#
enabled: true
# agents.shareProcessNamespace -- Set the process namespace sharing on the Datadog Daemonset
shareProcessNamespace: false
## Define the Datadog image to work with
image:
# agents.image.name -- Datadog Agent image name to use (relative to `registry`)
## use "dogstatsd" for Standalone Datadog Agent DogStatsD 7
name: agent
# agents.image.tag -- Define the Agent version to use
tag: 7.34.0
# agents.image.tagSuffix -- Suffix to append to Agent tag
## Ex:
## jmx to enable jmx fetch collection
## servercore to get Windows images based on servercore
tagSuffix: ""
# agents.image.repository -- Override default registry + image.name for Agent
repository:
# agents.image.doNotCheckTag -- Skip the version<>chart compatibility check
## By default, the version passed in agents.image.tag is checked
## for compatibility with the version of the chart.
## This boolean permits to completely skip this check.
## This is useful, for example, for custom tags that are not
## respecting semantic versioning
doNotCheckTag: # false
# agents.image.pullPolicy -- Datadog Agent image pull policy
pullPolicy: IfNotPresent
# agents.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials)
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
pullSecrets: []
# - name: "<REG_SECRET>"
## Provide Daemonset RBAC configuration
rbac:
# agents.rbac.create -- If true, create & use RBAC resources
create: true
# agents.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if agents.rbac.create is false
serviceAccountName: default
# agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true
serviceAccountAnnotations: {}
## Provide Daemonset PodSecurityPolicy configuration
podSecurity:
podSecurityPolicy:
# agents.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Agent pods
create: false
securityContextConstraints:
# agents.podSecurity.securityContextConstraints.create -- If true, create a SecurityContextConstraints resource for Agent pods
create: false
# agents.podSecurity.seLinuxContext -- Provide seLinuxContext configuration for PSP/SCC
# @default -- Must run as spc_t
seLinuxContext:
rule: MustRunAs
seLinuxOptions:
user: system_u
role: system_r
type: spc_t
level: s0
# agents.podSecurity.privileged -- If true, Allow to run privileged containers
privileged: false
# agents.podSecurity.capabilities -- Allowed capabilities
## capabilities must contain all agents.containers.*.securityContext.capabilities.
capabilities:
- SYS_ADMIN
- SYS_RESOURCE
- SYS_PTRACE
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
- IPC_LOCK
- CHOWN
- AUDIT_CONTROL
- AUDIT_READ
# agents.podSecurity.allowedUnsafeSysctls -- Allowed unsafe sysclts
allowedUnsafeSysctls: []
# agents.podSecurity.volumes -- Allowed volumes types
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- secret
# agents.podSecurity.seccompProfiles -- Allowed seccomp profiles
seccompProfiles:
- "runtime/default"
- "localhost/system-probe"
apparmor:
# agents.podSecurity.apparmor.enabled -- If true, enable apparmor enforcement
## see: https://kubernetes.io/docs/tutorials/clusters/apparmor/
enabled: true
# agents.podSecurity.apparmorProfiles -- Allowed apparmor profiles
apparmorProfiles:
- "runtime/default"
- "unconfined"
# agents.podSecurity.defaultApparmor -- Default AppArmor profile for all containers but system-probe
defaultApparmor: runtime/default
containers:
agent:
# agents.containers.agent.env -- Additional environment variables for the agent container
env: []
# agents.containers.agent.envFrom -- Set environment variables specific to agent container from configMaps and/or secrets
envFrom: []
# - configMapRef:
# name: <CONFIGMAP_NAME>
# - secretRef:
# name: <SECRET_NAME>
# agents.containers.agent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off
## If not set, fall back to the value of datadog.logLevel.
logLevel: # INFO
# agents.containers.agent.resources -- Resource requests and limits for the agent container.
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
# agents.containers.agent.healthPort -- Port number to use in the node agent for the healthz endpoint
healthPort: 5555
# agents.containers.agent.livenessProbe -- Override default agent liveness probe settings
# @default -- Every 15s / 6 KO / 1 OK
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
# agents.containers.agent.readinessProbe -- Override default agent readiness probe settings
# @default -- Every 15s / 6 KO / 1 OK
readinessProbe:
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
# agents.containers.agent.securityContext -- Allows you to overwrite the default container SecurityContext for the agent container.
securityContext: {}
# agents.containers.agent.ports -- Allows to specify extra ports (hostPorts for instance) for this container
ports: []
processAgent:
# agents.containers.processAgent.env -- Additional environment variables for the process-agent container
env: []
# agents.containers.processAgent.envFrom -- Set environment variables specific to process-agent from configMaps and/or secrets
envFrom: []
# - configMapRef:
# name: <CONFIGMAP_NAME>
# - secretRef:
# name: <SECRET_NAME>
# agents.containers.processAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off
## If not set, fall back to the value of datadog.logLevel.
logLevel: # INFO
# agents.containers.processAgent.resources -- Resource requests and limits for the process-agent container
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
# agents.containers.processAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the process-agent container.
securityContext: {}
# agents.containers.processAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container
ports: []
traceAgent:
# agents.containers.traceAgent.env -- Additional environment variables for the trace-agent container
env: