-
-
Notifications
You must be signed in to change notification settings - Fork 75
/
main.tf
242 lines (209 loc) · 10.9 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
locals {
enabled = module.this.enabled
tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
}
workers_role_arn = var.use_existing_aws_iam_instance_profile ? join("", data.aws_iam_instance_profile.default[*].role_arn) : join("", aws_iam_role.default[*].arn)
workers_role_name = var.use_existing_aws_iam_instance_profile ? join("", data.aws_iam_instance_profile.default[*].role_name) : join("", aws_iam_role.default[*].name)
userdata = templatefile("${path.module}/userdata.tpl", {
cluster_endpoint = var.cluster_endpoint
certificate_authority_data = var.cluster_certificate_authority_data
cluster_name = var.cluster_name
bootstrap_extra_args = var.bootstrap_extra_args
kubelet_extra_args = var.kubelet_extra_args
before_cluster_joining_userdata = var.before_cluster_joining_userdata
after_cluster_joining_userdata = var.after_cluster_joining_userdata
})
}
module "label" {
source = "cloudposse/label/null"
version = "0.25.0"
attributes = ["workers"]
tags = local.tags
context = module.this.context
}
data "aws_partition" "current" {}
data "aws_iam_policy_document" "assume_role" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role" "default" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
name = module.label.id
assume_role_policy = join("", data.aws_iam_policy_document.assume_role[*].json)
tags = module.label.tags
}
resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = join("", aws_iam_role.default[*].name)
}
resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKS_CNI_Policy"
role = join("", aws_iam_role.default[*].name)
}
resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = join("", aws_iam_role.default[*].name)
}
resource "aws_iam_role_policy_attachment" "existing_policies_attach_to_eks_workers_role" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? var.workers_role_policy_arns_count : 0
policy_arn = var.workers_role_policy_arns[count.index]
role = join("", aws_iam_role.default[*].name)
}
resource "aws_iam_instance_profile" "default" {
count = local.enabled && var.use_existing_aws_iam_instance_profile == false ? 1 : 0
name = module.label.id
role = join("", aws_iam_role.default[*].name)
}
resource "aws_security_group" "default" {
count = local.enabled && var.use_existing_security_group == false ? 1 : 0
name = module.label.id
description = "Security Group for EKS worker nodes"
vpc_id = var.vpc_id
tags = module.label.tags
}
resource "aws_security_group_rule" "egress" {
count = local.enabled && var.use_existing_security_group == false ? 1 : 0
description = "Allow all egress traffic"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = join("", aws_security_group.default[*].id)
type = "egress"
}
resource "aws_security_group_rule" "ingress_self" {
count = local.enabled && var.use_existing_security_group == false ? 1 : 0
description = "Allow nodes to communicate with each other"
from_port = 0
to_port = 65535
protocol = "-1"
security_group_id = join("", aws_security_group.default[*].id)
source_security_group_id = join("", aws_security_group.default[*].id)
type = "ingress"
}
resource "aws_security_group_rule" "ingress_cluster" {
count = local.enabled && var.cluster_security_group_ingress_enabled && var.use_existing_security_group == false ? 1 : 0
description = "Allow worker kubelets and pods to receive communication from the cluster control plane"
from_port = 0
to_port = 65535
protocol = "-1"
security_group_id = join("", aws_security_group.default[*].id)
source_security_group_id = var.cluster_security_group_id
type = "ingress"
}
resource "aws_security_group_rule" "ingress_security_groups" {
count = local.enabled && var.use_existing_security_group == false ? length(var.allowed_security_groups) : 0
description = "Allow inbound traffic from existing Security Groups"
from_port = 0
to_port = 65535
protocol = "-1"
source_security_group_id = var.allowed_security_groups[count.index]
security_group_id = join("", aws_security_group.default[*].id)
type = "ingress"
}
resource "aws_security_group_rule" "ingress_cidr_blocks" {
count = local.enabled && length(var.allowed_cidr_blocks) > 0 && var.use_existing_security_group == false ? 1 : 0
description = "Allow inbound traffic from CIDR blocks"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
security_group_id = join("", aws_security_group.default[*].id)
type = "ingress"
}
data "aws_ami" "eks_worker" {
count = local.enabled && var.use_custom_image_id == false ? 1 : 0
most_recent = true
name_regex = var.eks_worker_ami_name_regex
filter {
name = "name"
values = [var.eks_worker_ami_name_filter]
}
owners = ["602401143452"] # Amazon
}
data "aws_iam_instance_profile" "default" {
count = local.enabled && var.use_existing_aws_iam_instance_profile ? 1 : 0
name = var.aws_iam_instance_profile_name
}
module "autoscale_group" {
source = "cloudposse/ec2-autoscale-group/aws"
version = "0.40.0"
enabled = local.enabled
tags = merge(local.tags, var.autoscaling_group_tags)
image_id = var.use_custom_image_id ? var.image_id : join("", data.aws_ami.eks_worker[*].id)
iam_instance_profile_name = var.use_existing_aws_iam_instance_profile == false ? join("", aws_iam_instance_profile.default[*].name) : var.aws_iam_instance_profile_name
security_group_ids = compact(
concat(
[
var.use_existing_security_group == false ? join("", aws_security_group.default[*].id) : var.workers_security_group_id
],
var.additional_security_group_ids
)
)
user_data_base64 = base64encode(local.userdata)
instance_type = var.instance_type
subnet_ids = var.subnet_ids
min_size = var.min_size
max_size = var.max_size
associate_public_ip_address = var.associate_public_ip_address
block_device_mappings = var.block_device_mappings
credit_specification = var.credit_specification
disable_api_termination = var.disable_api_termination
ebs_optimized = var.ebs_optimized
elastic_gpu_specifications = var.elastic_gpu_specifications
instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
instance_market_options = var.instance_market_options
mixed_instances_policy = var.mixed_instances_policy
key_name = var.key_name
placement = var.placement
enable_monitoring = var.enable_monitoring
load_balancers = var.load_balancers
health_check_grace_period = var.health_check_grace_period
health_check_type = var.health_check_type
min_elb_capacity = var.min_elb_capacity
wait_for_elb_capacity = var.wait_for_elb_capacity
target_group_arns = var.target_group_arns
default_cooldown = var.default_cooldown
force_delete = var.force_delete
termination_policies = var.termination_policies
suspended_processes = var.suspended_processes
placement_group = var.placement_group
enabled_metrics = var.enabled_metrics
metrics_granularity = var.metrics_granularity
wait_for_capacity_timeout = var.wait_for_capacity_timeout
protect_from_scale_in = var.protect_from_scale_in
service_linked_role_arn = var.service_linked_role_arn
autoscaling_policies_enabled = var.autoscaling_policies_enabled
scale_up_cooldown_seconds = var.scale_up_cooldown_seconds
scale_up_scaling_adjustment = var.scale_up_scaling_adjustment
scale_up_adjustment_type = var.scale_up_adjustment_type
scale_up_policy_type = var.scale_up_policy_type
scale_down_cooldown_seconds = var.scale_down_cooldown_seconds
scale_down_scaling_adjustment = var.scale_down_scaling_adjustment
scale_down_adjustment_type = var.scale_down_adjustment_type
scale_down_policy_type = var.scale_down_policy_type
cpu_utilization_high_evaluation_periods = var.cpu_utilization_high_evaluation_periods
cpu_utilization_high_period_seconds = var.cpu_utilization_high_period_seconds
cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent
cpu_utilization_high_statistic = var.cpu_utilization_high_statistic
cpu_utilization_low_evaluation_periods = var.cpu_utilization_low_evaluation_periods
cpu_utilization_low_period_seconds = var.cpu_utilization_low_period_seconds
cpu_utilization_low_statistic = var.cpu_utilization_low_statistic
cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent
metadata_http_endpoint_enabled = var.metadata_http_endpoint_enabled
metadata_http_put_response_hop_limit = var.metadata_http_put_response_hop_limit
metadata_http_tokens_required = var.metadata_http_tokens_required
max_instance_lifetime = var.max_instance_lifetime
context = module.this.context
}