Skip to content

Commit

Permalink
Merge pull request #19 from nds-org/develop
Browse files Browse the repository at this point in the history
Release v1.1
  • Loading branch information
bodom0015 authored Nov 20, 2020
2 parents 09d8661 + 1421c01 commit efc6242
Show file tree
Hide file tree
Showing 15 changed files with 435 additions and 28 deletions.
16 changes: 9 additions & 7 deletions 05_provision_master.tf
Original file line number Diff line number Diff line change
Expand Up @@ -34,19 +34,21 @@ resource "null_resource" "provision_master" {
source = "assets/bootstrap.sh"
destination = "/home/ubuntu/bootstrap.sh"
}
provisioner "file" {
source = "assets/bootstrap-master.sh"
destination = "/home/ubuntu/bootstrap-master.sh"
}
provisioner "remote-exec" {
inline = [
"sleep 60",
"chmod +x /home/ubuntu/bootstrap.sh",
"/home/ubuntu/bootstrap.sh"
"/home/ubuntu/bootstrap.sh",
"chmod +x /home/ubuntu/bootstrap-master.sh",
"/home/ubuntu/bootstrap-master.sh ${var.pod_network_type}"
]
}

provisioner "remote-exec" {
script = "assets/bootstrap-master.sh"
}

provisioner "remote-exec" {
inline = ["kubectl label node ${var.env_name}-master external_ip=true"
]
inline = ["kubectl label node ${var.env_name}-master external_ip=true"]
}
}
40 changes: 36 additions & 4 deletions 06_provision_storage.tf
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,12 @@ count = "${var.storage_node_count}"


resource "null_resource" "install_nfs" {
depends_on = [
depends_on = [
"null_resource.provision_storage_mounts",
]
]

# Don't install if there are no storage nodes in use
count = "${var.storage_node_count > 0 ? 1 : 0}"
# Don't install if there are no storage nodes in use
count = "${(var.storage_node_count == 1) ? 1 : 0}"

connection {
user = "ubuntu"
Expand All @@ -86,3 +86,35 @@ count = "${var.storage_node_count > 0 ? 1 : 0}"
]
}
}

resource "null_resource" "install_glfs" {
depends_on = [
"null_resource.provision_storage_mounts",
]

# Don't install if there aren't enough storage nodes in use
count = "${var.storage_node_count > 1 ? 1 : 0}"

connection {
user = "ubuntu"
private_key = "${file("${var.privkey}")}"
host = "${openstack_networking_floatingip_v2.masterip.address}"
}

provisioner "file" {
source = "assets/glfs"
destination = "/home/ubuntu"
}

provisioner "file" {
source = "assets/deploy-glfs.sh"
destination = "/home/ubuntu/deploy-glfs.sh"
}

provisioner "remote-exec" {
inline = [
"chmod +x /home/ubuntu/deploy-glfs.sh",
"/home/ubuntu/deploy-glfs.sh ${var.storage_node_count}"
]
}
}
30 changes: 27 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ on the more specific value domains.
|worker_count | How many workers to provision |
| worker_ips_count | How many of the workers should be assigned an external IP address? |
| docker_volume_size | All nodes will have external block storage attached to use as the docker storage base (/var/lib/docker). Specify the size for these volumes in GBytes |
| storage_node_count | You can optionally provision nodes to host CEPH shared storage. This needs to be an even number. |
| storage_node_count | You can optionally provision nodes to host shared storage. 0 => no PVC support, 1 => NFS-backed PVC, 2+ => GlusterFS-backed PVCs [see below](README.md#nfs-provisioner) |
| storage_node_volume_size | Specify the size of the storage attached to each storage node. Expressed in GBytes |
| pod_network_type | Choose the type of overlay network to use in Kubernetes. Supported options: `flannel` (default), `weave` |
| dns_nameservers | A list of IP addresses of DNS name servers available to the new subnet |


Expand Down Expand Up @@ -78,10 +79,33 @@ on the more specific value domains.
labeled `external_ip=true`.

### NFS Provisioner
If you configured a storage node, it will be provisioned to run the NFS
provisioner. This will run a lightweight NFS server in your cluster for
If you configured a single storage node, it will be provisioned to run the
[NFS Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs).
This will run a lightweight NFS server in your Kubernetes cluster for
persistent volume claim support.

### GlusterFS Provisioner
If you configured 2 or more storage nodes, they will be provisioned to run the
[GlusterFS Simple Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/gluster/glusterfs).
This will run a distributed, scalable GlusterFS cluster in your Kubernetes
cluster for persistent volume claim support.

NOTE: Many (if not all) GlusterFS volume configurations will require an even
number of storage nodes. We do not support odd-numbered values > 2 for
`storage_node_count`.

### Pod Network Type
Use this setting to change the type of overlay network that will be deployed for your pods
to communicate with one another.

Supported options:
* [`flannel`](https://kubernetes.io/docs/concepts/cluster-administration/networking/#flannel)
(default): Default network support for basic pod-to-pod communication
* [`weave`](https://kubernetes.io/docs/concepts/cluster-administration/networking/#weave-net-from-weaveworks): Provides additional security for creating
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
resources to restrict Ingress / Egress between pods


# Resizing the cluster
Terraform makes this easy. Just adjust the values for the number of worker nodes
and reissue the
Expand Down
6 changes: 5 additions & 1 deletion assets/bootstrap-master.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
#!/bin/bash
cd ~/kubeadm-bootstrap
sudo -E ./init-master.bash

echo '============================'
echo '= Provsioning Master Node ='
echo '============================'
sudo -E ./init-master.bash $1

# Enable kubectl bash completion on on master
cat > kubectl << __EOF__
Expand Down
13 changes: 11 additions & 2 deletions assets/bootstrap.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,16 @@
#!/bin/bash
echo '============================'
echo '= Installing kubeadm ='
echo '============================'
cd ~
git clone https://github.com/data-8/kubeadm-bootstrap
git clone https://github.com/nds-org/kubeadm-bootstrap -b v1.1
cd kubeadm-bootstrap
sudo ./install-kubeadm.bash

sudo apt-get install -y jq nfs-common

echo '============================'
echo '= Updating OS Dependencies ='
echo '============================'
sudo apt-get update -qq
sudo apt-get upgrade -qq
sudo apt-get install -qq jq nfs-common
88 changes: 88 additions & 0 deletions assets/deploy-glfs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/bin/bash
#
# Usage: ./deploy-glfs.sh <number_of_storage_nodes>
#

# DEBUG ONLY: Set this to "echo" to neuter the script and perform a dry-run
DEBUG=""

# The host directory to store brick files
BRICK_HOSTDIR="/tmp"

# Read in the desired number of storage nodes from first arg
NODE_COUNT="$1"

# Ensure that we have enough storage nodes to run GLFS
if [ "$NODE_COUNT" -lt 2 ]; then
echo "ERROR: Cannot deploy GlusterFS with less than 2 nodes"
exit 1
fi

# Label storage nodes appropriately
STORAGE_NODES=$(kubectl get nodes --no-headers | grep storage | awk '{print $1}')
for node in $STORAGE_NODES; do
$DEBUG kubectl label nodes $node storagenode=glusterfs
done

# Create the GLFS cluster
$DEBUG kubectl apply -f glfs/glusterfs-daemonset.yaml

# Wait for the GLFS cluster to come up
count="$(kubectl get pods --no-headers | grep glusterfs | grep -v provisioner | awk '{print $3}' | grep Running | wc -l)"
while [ "$count" -lt "$NODE_COUNT" ]; do
echo "Waiting for GLFS: $count / $NODE_COUNT"
sleep 5
count="$(kubectl get pods --no-headers | grep glusterfs | grep -v provisioner | awk '{print $3}' | grep -o Running | wc -l)"
done
echo "GlusterFS is now Running: $count / $NODE_COUNT"

# Retrieve GlusterFS pod IPs
PEER_IPS=$(kubectl get pods -o wide | grep glusterfs | grep -v provisioner | awk '{print $6}')

# Use pod names / IPs to exec in and perform `gluster peer probe`
for pod_ip in ${PEER_IPS}; do
for peer_ip in ${PEER_IPS}; do
# Skip each node probing itself
if [ "$pod_ip" == "$peer_ip" ]; then
continue;
fi

# Perform a gluster peer probe
pod_name=$(kubectl get pods -o wide | grep $pod_ip | awk '{print $1}')
$DEBUG kubectl exec -it $pod_name gluster peer probe $peer_ip
done;
done;

# Dynamically build StorageClass from pod IPs (see below)
BRICK_PATHS=""
for pod_ip in ${PEER_IPS[@]}; do
# Insert comma if we already started accumlating ips/paths
if [ "$BRICK_PATHS" != "" ]; then
BRICK_PATHS="$BRICK_PATHS,"
fi

# Build up brickrootPaths one host at a time
BRICK_PATHS="${BRICK_PATHS}${pod_ip}:${BRICK_HOSTDIR}"
done

# Modify StorageClass to contain our GlusterFS brickrootPaths
echo "---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: glusterfs-simple
provisioner: gluster.org/glusterfs-simple
parameters:
forceCreate: \"true\"
volumeType: \"replica 2\"
brickrootPaths: \"$BRICK_PATHS\"
" > glfs/storageclass.yaml

# Create the storage class
$DEBUG kubectl apply -f glfs/storageclass.yaml

# Bind the necessary ServiceAccount / ClusterRole
$DEBUG kubectl apply -f glfs/rbac.yaml

# Create the GLFS Simple Provisioner
$DEBUG kubectl apply -f glfs/deployment.yaml
10 changes: 7 additions & 3 deletions assets/deploy-nfs.sh
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
#!/bin/bash

echo '============================='
echo '= Deploying NFS Provisioner ='
echo '============================='

# Create the NFS storage class
kubectl create -f nfs/storageclass.yaml
kubectl apply -f nfs/storageclass.yaml

# Deploy RBAC role/binding
kubectl create -f nfs/rbac.yaml
kubectl apply -f nfs/rbac.yaml

# Create the NFS provisioner
kubectl create -f nfs/deployment.yaml
kubectl apply -f nfs/deployment.yaml
18 changes: 18 additions & 0 deletions assets/glfs/deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: glusterfs-simple-provisioner
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: glusterfs-simple-provisioner
spec:
serviceAccount: glfs-provisioner
containers:
- image: "quay.io/external_storage/glusterfs-simple-provisioner:latest"
name: glusterfs-simple-provisioner
101 changes: 101 additions & 0 deletions assets/glfs/glusterfs-daemonset.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: glusterfs
labels:
glusterfs: daemonset
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
template:
metadata:
name: glusterfs
labels:
glusterfs-node: pod
spec:
nodeSelector:
storagenode: glusterfs
hostNetwork: true
containers:
- image: gluster/gluster-centos:latest
imagePullPolicy: IfNotPresent
name: glusterfs
volumeMounts:
- name: glusterfs-heketi
mountPath: "/var/lib/heketi"
- name: glusterfs-run
mountPath: "/run"
- name: glusterfs-lvm
mountPath: "/run/lvm"
- name: glusterfs-etc
mountPath: "/etc/glusterfs"
- name: glusterfs-logs
mountPath: "/var/log/glusterfs"
- name: glusterfs-config
mountPath: "/var/lib/glusterd"
- name: glusterfs-dev
mountPath: "/dev"
- name: glusterfs-misc
mountPath: "/var/lib/misc/glusterfsd"
- name: glusterfs-cgroup
mountPath: "/sys/fs/cgroup"
readOnly: true
- name: glusterfs-ssl
mountPath: "/etc/ssl"
readOnly: true
securityContext:
capabilities: {}
privileged: true
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
periodSeconds: 25
successThreshold: 1
failureThreshold: 15
livenessProbe:
timeoutSeconds: 3
initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
periodSeconds: 25
successThreshold: 1
failureThreshold: 15
volumes:
- name: glusterfs-heketi
hostPath:
path: "/var/lib/heketi"
- name: glusterfs-run
- name: glusterfs-lvm
hostPath:
path: "/run/lvm"
- name: glusterfs-etc
hostPath:
path: "/etc/glusterfs"
- name: glusterfs-logs
hostPath:
path: "/var/log/glusterfs"
- name: glusterfs-config
hostPath:
path: "/var/lib/glusterd"
- name: glusterfs-dev
hostPath:
path: "/dev"
- name: glusterfs-misc
hostPath:
path: "/var/lib/misc/glusterfsd"
- name: glusterfs-cgroup
hostPath:
path: "/sys/fs/cgroup"
- name: glusterfs-ssl
hostPath:
path: "/etc/ssl"
Loading

0 comments on commit efc6242

Please sign in to comment.