-
Notifications
You must be signed in to change notification settings - Fork 5
/
main.tf
164 lines (143 loc) · 4.89 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
resource "aws_security_group" "balancer" {
count = var.disable_security_groups ? 0 : 1
name = "nlb-${var.name}-sg-rds"
description = "The security group used to manage access to NLB: ${var.name}"
vpc_id = var.vpc_id
tags = merge(
var.tags,
{
"Name" = format("%s-%s-nlb", var.environment, var.name)
},
{
"Env" = var.environment
},
{
"KubernetesCluster" = var.environment
},
)
}
resource "aws_security_group_rule" "ingress" {
for_each = {
for key, value in var.ports : key => value
if var.disable_security_groups == false
}
type = "ingress"
from_port = each.key
to_port = each.key
protocol = "tcp"
cidr_blocks = var.security_group_ingress_cidr
security_group_id = aws_security_group.balancer[0].id
}
resource "aws_security_group_rule" "egress" {
count = var.disable_security_groups ? 0 : 1
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.balancer[0].id
}
resource "aws_lb_target_group" "target_groups" {
for_each = var.ports
name = "${var.environment}-${var.name}-${each.key}"
deregistration_delay = var.deregistration_delay
port = each.value["target_port"]
preserve_client_ip = var.preserve_client_ip
protocol = "TCP"
vpc_id = var.vpc_id
health_check {
interval = var.health_check_interval
port = each.value["target_port"]
protocol = "TCP"
healthy_threshold = var.healthy_threshold
unhealthy_threshold = var.unhealthy_threshold
}
tags = merge(
var.tags,
{
"Name" = format("%s-%s-nlb", var.environment, var.name)
},
{
"Env" = var.environment
},
{
"KubernetesCluster" = var.environment
},
)
}
resource "aws_autoscaling_attachment" "asg_attachment" {
/*
Terraform maps are notoriously difficult to follow, but unfortunately this was the only way to have a coherent input to the module.
Assumptions:
1. We need to use a map so that when the list is modified only the ones changed get touched (i.e what would happenw with a list is that if you remove an item, everything after it would get shifted left and recreated).
2. There is one attachment per ASG per targetgroup
What this does is returns a map of type map(object( asg_name = string, target_group_index = string))
With the Key of the map being the unique key of the attachment e.g
{
"$NLBPORT-$NODEPORT-$ASGNAME" = {
asg_name = $ASGNAME
target_group_index = $NLBPORT
}
}
The map key "$NLBPORT-$NODEPORT-$ASGNAME" is needed as each attachment needs to be unique
asg_name, the name of the ASG to attach
target_group_index, the index of the target group to attach to
*/
for_each = merge(flatten(
[for nlb_port, target in var.ports : {
for asg_name in target["target_groups"] : "${nlb_port}-${target["target_port"]}-${asg_name}" => {
asg_name = asg_name
target_group_index = nlb_port
}
}]
)...)
autoscaling_group_name = each.value["asg_name"]
alb_target_group_arn = aws_lb_target_group.target_groups[each.value.target_group_index].arn
}
resource "aws_lb_listener" "listeners" {
for_each = var.ports
load_balancer_arn = aws_lb.balancer.arn
port = each.key
protocol = "TCP"
default_action {
target_group_arn = aws_lb_target_group.target_groups[each.key].arn
type = "forward"
}
}
resource "aws_lb" "balancer" {
name = "${var.environment}-${var.name}-nlb"
enable_cross_zone_load_balancing = "true"
internal = length(var.internal_nlb_subnet_mappings) > 0 ? true : var.internal
load_balancer_type = "network"
subnets = length(var.internal_nlb_subnet_mappings) > 0 ? null : var.subnet_ids
dynamic "subnet_mapping" {
for_each = var.internal_nlb_subnet_mappings
content {
subnet_id = subnet_mapping.value.subnet_id
private_ipv4_address = subnet_mapping.value.private_ipv4_address
}
}
security_groups = var.disable_security_groups ? null : [aws_security_group.balancer[0].id] # Disable for backwards compatability with version 2 of this module
tags = merge(
var.tags,
{
"Name" = format("%s-%s", var.environment, var.name)
},
{
"Env" = var.environment
},
{
"KubernetesCluster" = var.environment
},
)
}
resource "aws_route53_record" "dns" {
zone_id = data.aws_route53_zone.selected.zone_id
name = var.dns_name == "" ? var.name : var.dns_name
type = var.dns_type
alias {
name = aws_lb.balancer.dns_name
zone_id = aws_lb.balancer.zone_id
evaluate_target_health = true
}
}