Skip to content

Commit

Permalink
Updates load balancer init process to initialize with hosts (#74)
Browse files Browse the repository at this point in the history
* Fixes creating a control plane node without a load balancer

* Init load balancers with masters

* Log masters being set in load balancer

* Pulls the lb config file generation into a tested function
  • Loading branch information
Eagerod authored Feb 22, 2025
1 parent 7159757 commit 8aa074c
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 16 deletions.
16 changes: 13 additions & 3 deletions cmd/hope/node/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,12 @@ var initCmd = &cobra.Command{
// Load balancer have a super lightweight init, so run its init before
// fetching some potentially heavier state from the cluster.
if node.IsLoadBalancer() {
return hope.InitLoadBalancer(log.WithFields(log.Fields{}), &node)
masters, err := utils.GetAvailableMasters()
if err != nil {
return err
}

return hope.InitLoadBalancer(log.WithFields(log.Fields{}), &node, &masters)
}

podNetworkCidr := viper.GetString("pod_network_cidr")
Expand All @@ -49,12 +54,17 @@ var initCmd = &cobra.Command{
if err != nil && loadBalancer != (hope.Node{}) {
return err
}

var lbp *hope.Node = nil
if loadBalancer != (hope.Node{}) {
lbp = &loadBalancer
}
loadBalancerHost := viper.GetString("load_balancer_host")

if node.IsMasterAndNode() {
log.Info("Node ", node.Host, " appears to be both master and node. Creating master and removing NoSchedule taint...")

if err := hope.CreateClusterMaster(log.WithFields(log.Fields{}), &node, podNetworkCidr, &loadBalancer, loadBalancerHost, &masters, initCmdForce); err != nil {
if err := hope.CreateClusterMaster(log.WithFields(log.Fields{}), &node, podNetworkCidr, lbp, loadBalancerHost, &masters, initCmdForce); err != nil {
return err
}

Expand All @@ -67,7 +77,7 @@ var initCmd = &cobra.Command{

return hope.TaintNodeByHost(kubectl, &node, "node-role.kubernetes.io/master:NoSchedule-")
} else if node.IsMaster() {
return hope.CreateClusterMaster(log.WithFields(log.Fields{}), &node, podNetworkCidr, &loadBalancer, loadBalancerHost, &masters, initCmdForce)
return hope.CreateClusterMaster(log.WithFields(log.Fields{}), &node, podNetworkCidr, lbp, loadBalancerHost, &masters, initCmdForce)
} else if node.IsNode() {
return hope.CreateClusterNode(log.WithFields(log.Fields{}), &node, &masters, initCmdForce)
} else {
Expand Down
33 changes: 20 additions & 13 deletions pkg/hope/load_balancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,31 +17,38 @@ import (

// Just forwards to `SetLoadBalancerHosts`.
// There may be a time where this does more.
func InitLoadBalancer(log *logrus.Entry, node *Node) error {
func InitLoadBalancer(log *logrus.Entry, node *Node, masters *[]Node) error {
log.Debug("Starting to bootstrap a simple NGINX load balancer for API Servers at ", node.Host)
return SetLoadBalancerHosts(log, node, &[]Node{})
return SetLoadBalancerHosts(log, node, masters)
}

func SetLoadBalancerHosts(log *logrus.Entry, node *Node, masters *[]Node) error {
if len(*masters) == 0 {
log.Warn("Setting empty load balancer hosts.")
}

connectionString := node.ConnectionString()

// Build the nginx.conf file contents setting the given nodes as upstreams.
func loadBalancerConfigurationFile(log *logrus.Entry, upstreams *[]Node) string {
// In the case where there are no masters yet, send traffic to a black
// hole.
// Prevents Nginx from crash looping; upstream servers need at least one
// endpoint.
masterUpstreamContents := ""
if len(*masters) == 0 {
if len(*upstreams) == 0 {
masterUpstreamContents = "server 0.0.0.0:6443;"
} else {
for _, master := range *masters {
masterIps := []string{}
for _, master := range *upstreams {
masterUpstreamContents = fmt.Sprintf("%s\n server %s:6443;", masterUpstreamContents, master.Host)
masterIps = append(masterIps, fmt.Sprintf("%s:6443", master.Host))
}
log.Infof("Setting load balancer upstreams to: %s", strings.Join(masterIps, ", "))
}
populatedConfig := fmt.Sprintf(NginxConfig, masterUpstreamContents)
return fmt.Sprintf(NginxConfig, masterUpstreamContents)
}

func SetLoadBalancerHosts(log *logrus.Entry, node *Node, masters *[]Node) error {
if len(*masters) == 0 {
log.Warn("Setting empty load balancer hosts.")
}

connectionString := node.ConnectionString()

populatedConfig := loadBalancerConfigurationFile(log, masters)
configTempFilename := uuid.New().String()
dest := fmt.Sprintf("%s:%s", connectionString, configTempFilename)
if err := scp.ExecSCPBytes([]byte(populatedConfig), dest); err != nil {
Expand Down
24 changes: 24 additions & 0 deletions pkg/hope/load_balancer_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
package hope

import (
"testing"
)

import (
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)

func TestLoadBalancerConfigurationFileNoMasters(t *testing.T) {
masters := []Node{}
config := loadBalancerConfigurationFile(log.WithFields(log.Fields{}), &masters)
assert.Contains(t, config, "0.0.0.0:6443")
}

func TestLoadBalancerConfigurationFileMasters(t *testing.T) {
masters := []Node{
Node{Host: "192.168.1.254"},
}
config := loadBalancerConfigurationFile(log.WithFields(log.Fields{}), &masters)
assert.Contains(t, config, "192.168.1.254:6443")
}

0 comments on commit 8aa074c

Please sign in to comment.