'. | `string` | n/a | yes |
| [resource\_chart\_version](#input\_resource\_chart\_version) | The dasmeta karpenter-resources chart version | `string` | `"0.1.0"` | no |
| [resource\_configs](#input\_resource\_configs) | Configurations to pass and override default ones for karpenter-resources chart. Check the helm chart available configs here: https://github.com/dasmeta/helm/tree/karpenter-resources-0.1.0/charts/karpenter-resources | `any` | `{}` | no |
-| [resource\_configs\_defaults](#input\_resource\_configs\_defaults) | Configurations to pass and override default ones for karpenter-resources chart. Check the helm chart available configs here: https://github.com/dasmeta/helm/tree/karpenter-resources-0.1.0/charts/karpenter-resources | object({
nodeClass = optional(any, {
amiFamily = "AL2" # Amazon Linux 2
detailedMonitoring = true
metadataOptions = {
httpEndpoint = "enabled"
httpProtocolIPv6 = "disabled"
httpPutResponseHopLimit = 2 # This is changed to disable IMDS access from containers not on the host network
httpTokens = "required"
}
})
nodeClassRef = optional(any, {
group = "karpenter.k8s.aws"
kind = "EC2NodeClass"
name = "default"
}),
requirements = optional(any, [
{
key = "karpenter.k8s.aws/instance-cpu"
operator = "Lt"
values = ["9"] # <=8 core cpu nodes
},
{
key = "karpenter.k8s.aws/instance-memory"
operator = "Lt"
values = ["33000"] # <=32 Gb memory nodes
},
{
key = "karpenter.k8s.aws/instance-memory"
operator = "Gt"
values = ["1000"] # >1Gb Gb memory nodes
},
{
key = "karpenter.k8s.aws/instance-generation"
operator = "Gt"
values = ["2"] # generation of ec2 instances >2 (like t3a.medium) are more performance and effectiveness
},
{
key = "kubernetes.io/arch"
operator = "In"
values = ["amd64"] # amd64 linux is main platform arch we will use
},
{
key = "karpenter.sh/capacity-type"
operator = "In"
values = ["spot", "on-demand"] # both spot and on-demand nodes, it will look at first available spot and if no then on-demand
}
])
disruption = optional(any, {
consolidationPolicy = "WhenEmptyOrUnderutilized"
consolidateAfter = "1m"
}),
limits = optional(any, {
cpu = 10
})
})
| `{}` | no |
+| [resource\_configs\_defaults](#input\_resource\_configs\_defaults) | Configurations to pass and override default ones for karpenter-resources chart. Check the helm chart available configs here: https://github.com/dasmeta/helm/tree/karpenter-resources-0.1.0/charts/karpenter-resources | object({
nodeClass = optional(any, {
amiFamily = "AL2" # Amazon Linux 2
detailedMonitoring = true
metadataOptions = {
httpEndpoint = "enabled"
httpProtocolIPv6 = "disabled"
httpPutResponseHopLimit = 2 # This is changed to disable IMDS access from containers not on the host network
httpTokens = "required"
}
blockDeviceMappings = [
{
deviceName = "/dev/xvda"
ebs = {
volumeSize = "100Gi"
volumeType = "gp3"
encrypted = true
}
}
]
})
nodeClassRef = optional(any, {
group = "karpenter.k8s.aws"
kind = "EC2NodeClass"
name = "default"
}),
requirements = optional(any, [
{
key = "karpenter.k8s.aws/instance-cpu"
operator = "Lt"
values = ["9"] # <=8 core cpu nodes
},
{
key = "karpenter.k8s.aws/instance-memory"
operator = "Lt"
values = ["33000"] # <=32 Gb memory nodes
},
{
key = "karpenter.k8s.aws/instance-memory"
operator = "Gt"
values = ["1000"] # >1Gb Gb memory nodes
},
{
key = "karpenter.k8s.aws/instance-generation"
operator = "Gt"
values = ["2"] # generation of ec2 instances >2 (like t3a.medium) are more performance and effectiveness
},
{
key = "kubernetes.io/arch"
operator = "In"
values = ["amd64"] # amd64 linux is main platform arch we will use
},
{
key = "karpenter.sh/capacity-type"
operator = "In"
values = ["spot", "on-demand"] # both spot and on-demand nodes, it will look at first available spot and if no then on-demand
}
])
disruption = optional(any, {
consolidationPolicy = "WhenEmptyOrUnderutilized"
consolidateAfter = "1m"
}),
limits = optional(any, {
cpu = 10
})
})
| `{}` | no |
| [subnet\_ids](#input\_subnet\_ids) | VPC subnet ids used for default Ec2NodeClass as subnet selector. | `list(string)` | n/a | yes |
| [wait](#input\_wait) | Whether use helm deploy with --wait flag | `bool` | `true` | no |
diff --git a/modules/karpenter/locals.tf b/modules/karpenter/locals.tf
index cddff22..28f7c28 100644
--- a/modules/karpenter/locals.tf
+++ b/modules/karpenter/locals.tf
@@ -10,8 +10,9 @@ locals {
amiSelectorTerms = [
{ id = data.aws_instance.ec2_from_eks_node_pool.ami }
]
- detailedMonitoring = var.resource_configs_defaults.nodeClass.detailedMonitoring
- metadataOptions = var.resource_configs_defaults.nodeClass.metadataOptions
+ detailedMonitoring = var.resource_configs_defaults.nodeClass.detailedMonitoring
+ metadataOptions = var.resource_configs_defaults.nodeClass.metadataOptions
+ blockDeviceMappings = var.resource_configs_defaults.nodeClass.blockDeviceMappings
}
nodePoolDefaultNodeClassRef = var.resource_configs_defaults.nodeClassRef
diff --git a/modules/karpenter/variables.tf b/modules/karpenter/variables.tf
index 0748c62..cf43f8d 100644
--- a/modules/karpenter/variables.tf
+++ b/modules/karpenter/variables.tf
@@ -101,6 +101,16 @@ variable "resource_configs_defaults" {
httpPutResponseHopLimit = 2 # This is changed to disable IMDS access from containers not on the host network
httpTokens = "required"
}
+ blockDeviceMappings = [
+ {
+ deviceName = "/dev/xvda"
+ ebs = {
+ volumeSize = "100Gi"
+ volumeType = "gp3"
+ encrypted = true
+ }
+ }
+ ]
})
nodeClassRef = optional(any, {
group = "karpenter.k8s.aws"