Skip to content

Commit 702d6f9

Browse files
committed
🥇 First commit
0 parents  commit 702d6f9

10 files changed

+453
-0
lines changed

‎.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
.terraform*
2+
terraform.tfstate*

‎README.md

+86
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# (no name yet)
2+
3+
This is a Terraform configuration to deploy a Kubernetes cluster on
4+
[Oracle Cloud Infrastructure][oci]. It creates a few virtual machines
5+
and uses [kubeadm] to install a Kubernetes control plane on the first
6+
machine, and join the other machines as worker nodes.
7+
8+
By default, it deploys a 4-node cluster using ARM machines. Each machine
9+
has 1 OCPU and 6 GB of RAM, which means that the cluster fits within
10+
Oracle's (pretty generous if you ask me) [free tier][freetier].
11+
12+
**It is not meant to run production workloads,**
13+
but it's great if you want to learn Kubernetes with a "real" cluster
14+
(i.e. a cluster with multiple nodes) without breaking the bank, *and*
15+
if you want to develop or test applications on ARM.
16+
17+
## Getting started
18+
19+
1. Create an Oracle Cloud Infrastructure account.
20+
2. Configure OCI credentials. (FIXME)
21+
3. `terraform apply`
22+
23+
That's it!
24+
25+
At the end of the `terraform apply`, a `kubeconfig` file is generated
26+
in this directory. To use your new cluster, you can do:
27+
28+
```bash
29+
export KUBECONFIG=$PWD/kubeconfig
30+
kubectl get nodes
31+
```
32+
33+
The command above should show you 4 nodes, named `node1` to `node4`.
34+
35+
You can also log into the VMs. At the end of the Terraform output
36+
you should see a command that you can use to SSH into the first VM
37+
(just copy-paste the command).
38+
39+
## Customization
40+
41+
Check `variables.tf` to see tweakable parameters. You can change the number
42+
of nodes, the size of the nodes, or switch to Intel/AMD instances if you'd
43+
like. Keep in mind that if you switch to Intel/AMD instances, you won't get
44+
advantage of the free tier.
45+
46+
## Stopping the cluster
47+
48+
`terraform destroy`
49+
50+
## Implementation details
51+
52+
This Terraform configuration:
53+
54+
- generates an OpenSSH keypair and a kubeadm token
55+
- deploys 4 VMs using Ubuntu 20.04
56+
- uses cloud-init to install and configure everything
57+
- installs Docker and Kubernetes packages
58+
- runs `kubeadm init` on the first VM
59+
- runs `kubeadm join` on the other VMs
60+
- installs the Weave CNI plugin
61+
- transfers the `kubeconfig` file generated by `kubeadm`
62+
- patches that file to use the public IP address of the machine
63+
64+
## Caveats
65+
66+
There is no cloud controller manager, which means that you cannot
67+
create services with `type: LoadBalancer`; or rather, if you create
68+
such services, their `EXTERNAL-IP` will remain `<pending>`.
69+
70+
To expose services, use `NodePort`.
71+
72+
Likewise, there is no ingress controller and no storage class.
73+
74+
(These might be added in a later iteration of this project.)
75+
76+
## Remarks
77+
78+
Oracle Cloud also has a managed Kubernetes service called
79+
[Container Engine for Kubernetes (or OKE)][oke]. That service
80+
doesn't have the caveats mentioned above; however, it's not part
81+
of the free tier.
82+
83+
[freetier]: https://www.oracle.com/cloud/free/
84+
[kubeadm]: https://kubernetes.io/docs/reference/setup-tools/kubeadm/
85+
[oci]: https://www.oracle.com/cloud/compute/
86+
[oke]: https://www.oracle.com/cloud-native/container-engine-kubernetes/

‎cloudinit.tf

+164
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
locals {
2+
packages = [
3+
"apt-transport-https",
4+
"build-essential",
5+
"ca-certificates",
6+
"curl",
7+
"docker.io",
8+
"jq",
9+
"kubeadm",
10+
"kubelet",
11+
"lsb-release",
12+
"make",
13+
"prometheus-node-exporter",
14+
"python3-pip",
15+
"software-properties-common",
16+
"tmux",
17+
"tree",
18+
"unzip",
19+
]
20+
}
21+
22+
data "cloudinit_config" "_" {
23+
for_each = local.nodes
24+
25+
part {
26+
filename = "cloud-config.cfg"
27+
content_type = "text/cloud-config"
28+
content = <<-EOF
29+
hostname: ${each.value.node_name}
30+
package_update: true
31+
package_upgrade: false
32+
packages:
33+
${yamlencode(local.packages)}
34+
apt:
35+
sources:
36+
kubernetes.list:
37+
source: "deb https://apt.kubernetes.io/ kubernetes-xenial main"
38+
key: |
39+
${indent(8, data.http.apt_repo_key.body)}
40+
users:
41+
- default
42+
- name: k8s
43+
primary_group: k8s
44+
groups: docker
45+
home: /home/k8s
46+
shell: /bin/bash
47+
sudo: ALL=(ALL) NOPASSWD:ALL
48+
ssh_authorized_keys:
49+
- ${tls_private_key.ssh.public_key_openssh}
50+
write_files:
51+
- path: /etc/kubeadm_token
52+
owner: "root:root"
53+
permissions: "0600"
54+
content: ${local.kubeadm_token}
55+
- path: /etc/kubeadm_config.yaml
56+
owner: "root:root"
57+
permissions: "0600"
58+
content: |
59+
kind: InitConfiguration
60+
apiVersion: kubeadm.k8s.io/v1beta2
61+
bootstrapTokens:
62+
- token: ${local.kubeadm_token}
63+
---
64+
kind: KubeletConfiguration
65+
apiVersion: kubelet.config.k8s.io/v1beta1
66+
cgroupDriver: cgroupfs
67+
---
68+
kind: ClusterConfiguration
69+
apiVersion: kubeadm.k8s.io/v1beta2
70+
apiServer:
71+
certSANs:
72+
- @@PUBLIC_IP_ADDRESS@@
73+
- path: /home/k8s/.ssh/id_rsa
74+
defer: true
75+
owner: "k8s:k8s"
76+
permissions: "0600"
77+
content: |
78+
${indent(4, tls_private_key.ssh.private_key_pem)}
79+
- path: /home/k8s/.ssh/id_rsa.pub
80+
defer: true
81+
owner: "k8s:k8s"
82+
permissions: "0600"
83+
content: |
84+
${indent(4, tls_private_key.ssh.public_key_openssh)}
85+
EOF
86+
}
87+
88+
# By default, all inbound traffic is blocked
89+
# (except SSH) so we need to change that.
90+
part {
91+
filename = "allow-inbound-traffic.sh"
92+
content_type = "text/x-shellscript"
93+
content = <<-EOF
94+
#!/bin/sh
95+
sed -i "s/-A INPUT -j REJECT --reject-with icmp-host-prohibited//" /etc/iptables/rules.v4
96+
netfilter-persistent start
97+
EOF
98+
}
99+
100+
dynamic "part" {
101+
for_each = each.value.role == "controlplane" ? ["yes"] : []
102+
content {
103+
filename = "kubeadm-init.sh"
104+
content_type = "text/x-shellscript"
105+
content = <<-EOF
106+
#!/bin/sh
107+
PUBLIC_IP_ADDRESS=$(curl https://icanhazip.com/)
108+
sed -i s/@@PUBLIC_IP_ADDRESS@@/$PUBLIC_IP_ADDRESS/ /etc/kubeadm_config.yaml
109+
kubeadm init --config=/etc/kubeadm_config.yaml --ignore-preflight-errors=NumCPU
110+
export KUBECONFIG=/etc/kubernetes/admin.conf
111+
kubever=$(kubectl version | base64 | tr -d '\n')
112+
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$kubever
113+
mkdir -p /home/k8s/.kube
114+
cp $KUBECONFIG /home/k8s/.kube/config
115+
chown -R k8s:k8s /home/k8s/.kube
116+
EOF
117+
}
118+
}
119+
120+
dynamic "part" {
121+
for_each = each.value.role == "worker" ? ["yes"] : []
122+
content {
123+
filename = "kubeadm-join.sh"
124+
content_type = "text/x-shellscript"
125+
content = <<-EOF
126+
#!/bin/sh
127+
kubeadm join --discovery-token-unsafe-skip-ca-verification --token ${local.kubeadm_token} ${local.nodes[1].ip_address}:6443
128+
EOF
129+
}
130+
}
131+
}
132+
133+
data "http" "apt_repo_key" {
134+
url = "https://packages.cloud.google.com/apt/doc/apt-key.gpg.asc"
135+
}
136+
137+
# The kubeadm token must follow a specific format:
138+
# - 6 letters/numbers
139+
# - a dot
140+
# - 16 letters/numbers
141+
142+
resource "random_string" "token1" {
143+
length = 6
144+
number = true
145+
lower = true
146+
special = false
147+
upper = false
148+
}
149+
150+
resource "random_string" "token2" {
151+
length = 16
152+
number = true
153+
lower = true
154+
special = false
155+
upper = false
156+
}
157+
158+
locals {
159+
kubeadm_token = format(
160+
"%s.%s",
161+
random_string.token1.result,
162+
random_string.token2.result
163+
)
164+
}

‎kubeconfig.tf

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
resource "null_resource" "wait_for_kube_apiserver" {
2+
depends_on = [oci_core_instance._[1]]
3+
provisioner "local-exec" {
4+
command = <<-EOT
5+
while ! curl -k https://${oci_core_instance._[1].public_ip}:6443; do
6+
sleep 1
7+
done
8+
EOT
9+
}
10+
}
11+
12+
data "external" "kubeconfig" {
13+
depends_on = [null_resource.wait_for_kube_apiserver]
14+
program = [
15+
"sh",
16+
"-c",
17+
<<-EOT
18+
set -e
19+
cat >/dev/null
20+
echo '{"base64": "'$(
21+
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
22+
-l k8s -i ${local_file.ssh_private_key.filename} \
23+
${oci_core_instance._[1].public_ip} \
24+
sudo cat /etc/kubernetes/admin.conf | base64 -w0
25+
)'"}'
26+
EOT
27+
]
28+
}
29+
30+
resource "local_file" "kubeconfig" {
31+
content = base64decode(data.external.kubeconfig.result.base64)
32+
filename = "kubeconfig"
33+
file_permission = "0600"
34+
provisioner "local-exec" {
35+
command = "kubectl --kubeconfig=kubeconfig config set-cluster kubernetes --server=https://${oci_core_instance._[1].public_ip}:6443"
36+
}
37+
}

‎main.tf

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
resource "oci_identity_compartment" "_" {
2+
name = var.name
3+
description = var.name
4+
enable_delete = true
5+
}
6+
7+
locals {
8+
compartment_id = oci_identity_compartment._.id
9+
}
10+
11+
data "oci_identity_availability_domains" "_" {
12+
compartment_id = local.compartment_id
13+
}
14+
15+
data "oci_core_images" "_" {
16+
compartment_id = local.compartment_id
17+
shape = var.shape
18+
operating_system = "Canonical Ubuntu"
19+
operating_system_version = "20.04"
20+
#operating_system = "Oracle Linux"
21+
#operating_system_version = "7.9"
22+
}
23+
24+
resource "oci_core_instance" "_" {
25+
for_each = local.nodes
26+
display_name = each.value.node_name
27+
availability_domain = data.oci_identity_availability_domains._.availability_domains[0].name
28+
compartment_id = local.compartment_id
29+
shape = var.shape
30+
shape_config {
31+
memory_in_gbs = var.memory_in_gbs_per_node
32+
ocpus = var.ocpus_per_node
33+
}
34+
source_details {
35+
source_id = data.oci_core_images._.images[0].id
36+
source_type = "image"
37+
}
38+
create_vnic_details {
39+
subnet_id = oci_core_subnet._.id
40+
private_ip = each.value.ip_address
41+
}
42+
metadata = {
43+
ssh_authorized_keys = join("\n", local.authorized_keys)
44+
user_data = data.cloudinit_config._[each.key].rendered
45+
}
46+
}
47+
48+
locals {
49+
nodes = {
50+
for i in range(1, 1 + var.how_many_nodes) :
51+
i => {
52+
node_name = format("node%d", i)
53+
ip_address = format("10.0.0.%d", 10 + i)
54+
role = i == 1 ? "controlplane" : "worker"
55+
}
56+
}
57+
}

‎network.tf

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
resource "oci_core_vcn" "_" {
2+
compartment_id = local.compartment_id
3+
cidr_block = "10.0.0.0/16"
4+
}
5+
6+
resource "oci_core_internet_gateway" "_" {
7+
compartment_id = local.compartment_id
8+
vcn_id = oci_core_vcn._.id
9+
}
10+
11+
resource "oci_core_default_route_table" "_" {
12+
manage_default_resource_id = oci_core_vcn._.default_route_table_id
13+
route_rules {
14+
destination = "0.0.0.0/0"
15+
destination_type = "CIDR_BLOCK"
16+
network_entity_id = oci_core_internet_gateway._.id
17+
}
18+
}
19+
20+
resource "oci_core_default_security_list" "_" {
21+
manage_default_resource_id = oci_core_vcn._.default_security_list_id
22+
ingress_security_rules {
23+
protocol = "all"
24+
source = "0.0.0.0/0"
25+
}
26+
egress_security_rules {
27+
protocol = "all"
28+
destination = "0.0.0.0/0"
29+
}
30+
}
31+
32+
resource "oci_core_subnet" "_" {
33+
compartment_id = local.compartment_id
34+
cidr_block = "10.0.0.0/24"
35+
vcn_id = oci_core_vcn._.id
36+
route_table_id = oci_core_default_route_table._.id
37+
security_list_ids = [oci_core_default_security_list._.id]
38+
}

‎outputs.tf

+8
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
output "ssh" {
2+
value = format(
3+
"\nssh -i %s -l %s %s\n",
4+
local_file.ssh_private_key.filename,
5+
"k8s",
6+
oci_core_instance._[1].public_ip
7+
)
8+
}

0 commit comments

Comments
 (0)