Skip to content

Commit

Permalink
kube cloud: add ctlplane to lb when scaling
Browse files Browse the repository at this point in the history
  • Loading branch information
karmab committed Feb 26, 2024
1 parent cc8db10 commit 886d732
Show file tree
Hide file tree
Showing 9 changed files with 162 additions and 111 deletions.
2 changes: 1 addition & 1 deletion kvirt/cluster/k3s/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def scale(config, plandir, cluster, overrides):
else:
data['client'] = config.client
data.update(overrides)
data['scale'] = True
cloud_native = data.get('cloud_native')
cloud_lb = data.get('cloud_lb', provider in cloud_providers and data['ctlplanes'] > 1)
ctlplanes = data['ctlplanes']
Expand All @@ -73,7 +74,6 @@ def scale(config, plandir, cluster, overrides):
if arg.startswith('install_k3s'):
install_k3s_args.append(f"{arg.upper()}={data[arg]}")
overrides = data.copy()
overrides['scale'] = True
threaded = data.get('threaded', False) or data.get(f'{role}_threaded', False)
if role == 'ctlplanes':
if ctlplanes == 1:
Expand Down
3 changes: 3 additions & 0 deletions kvirt/cluster/k3s/ctlplanes.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
numcpus: {{ ctlplane_numcpus | default(numcpus, numcpus) }}
memory: {{ ctlplane_memory | default(memory, memory) }}
nets: {{ [network] + extra_networks }}
{% if scale|default(False) and cloud_lb %}
loadbalancer: api.{{ cluster }}
{% endif %}
disks: {{ [disk_size] + extra_disks }}
files: {{ all_files }}
scripts: {{ all_scripts }}
Expand Down
2 changes: 1 addition & 1 deletion kvirt/cluster/kubeadm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def scale(config, plandir, cluster, overrides):
data.update(installparam)
plan = installparam.get('plan', plan)
data.update(overrides)
data['scale'] = True
if os.path.exists(clusterdir):
with open(f"{clusterdir}/kcli_parameters.yml", 'w') as paramfile:
yaml.safe_dump(data, paramfile)
Expand All @@ -73,7 +74,6 @@ def scale(config, plandir, cluster, overrides):
os.chdir(os.path.expanduser("~/.kcli"))
for role in ['ctlplanes', 'workers']:
overrides = data.copy()
overrides['scale'] = True
if overrides.get(role, 0) == 0:
continue
threaded = data.get('threaded', False) or data.get(f'{role}_threaded', False)
Expand Down
3 changes: 3 additions & 0 deletions kvirt/cluster/kubeadm/ctlplanes.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
numcpus: {{ ctlplane_numcpus | default(numcpus, numcpus) }}
memory: {{ ctlplane_memory | default(memory, memory) }}
domain: {{ domain }}
{% if scale|default(False) and cloud_lb %}
loadbalancer: api.{{ cluster }}
{% endif %}
nets: {{ [primary_network] + extra_networks }}
disks: {{ [disk_size] + extra_disks }}
files:
Expand Down
1 change: 1 addition & 0 deletions kvirt/cluster/openshift/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -645,6 +645,7 @@ def scale(config, plandir, cluster, overrides):
data.update(installparam)
plan = installparam.get('plan', plan)
data.update(overrides)
data['scale'] = True
if os.path.exists(clusterdir):
with open(f"{clusterdir}/kcli_parameters.yml", 'w') as paramfile:
safe_dump(data, paramfile)
Expand Down
3 changes: 3 additions & 0 deletions kvirt/cluster/openshift/cloud_ctlplanes.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@

{{ cluster }}-ctlplane-{{ num }}:
domain: {{ domain if config_type != 'azure' else '' }}
{% if scale|default(False) and cloud_lb %}
loadbalancer: api.{{ cluster }}
{% endif %}
image: {{ image }}
pool: {{ pool or config_pool }}
{% if flavor != None %}
Expand Down
230 changes: 123 additions & 107 deletions kvirt/providers/aws/__init__.py

Large diffs are not rendered by default.

12 changes: 10 additions & 2 deletions kvirt/providers/azure/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,13 @@ def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpu
result.wait()
if reservedns and domain is not None:
self.reserve_dns(name, nets=nets, domain=domain, alias=alias, instanceid=name)
if 'loadbalancer' in overrides:
lb = network_client.load_balancers.list(self.resource_group, overrides['loadbalancer'])
backend_id = lb.backend_address_pools[0].id
rule = lb.inbound_nat_rules[0] if lb.inbound_nat_rules else lb.load_balancing_rules[0]
ports = rule.frontend_port_range_start if lb.inbound_nat_rules else rule.frontend_port
self.add_vm_to_loadbalancer(name, backend_id, ports)
self.update_metadata(name, 'loadbalancer', lb, append=True)
return {'result': 'success'}

def start(self, name):
Expand Down Expand Up @@ -1317,7 +1324,7 @@ def create_loadbalancer(self, name, ports=[], checkpath='/index.html', vms=[], d
if self.debug:
print(lb)
for index, vm in enumerate(vms):
self.set_loadbalancer(vm, backend_id, ports, backend_id_dual=backend_id_dual)
self.add_vm_to_loadbalancer(vm, backend_id, ports, backend_id_dual=backend_id_dual)
self.update_metadata(vm, 'loadbalancer', name)
if domain is not None:
if not internal:
Expand Down Expand Up @@ -1385,7 +1392,8 @@ def list_loadbalancers(self):
results.append([lb.name, ip, protocol, ports, target])
return results

def set_loadbalancer(self, name, backend_id, ports, backend_id_dual=None):
def add_vm_to_loadbalancer(self, vm, backend_id, ports, backend_id_dual=None):
name = vm
try:
vm = self.compute_client.virtual_machines.get(self.resource_group, name)
except:
Expand Down
17 changes: 17 additions & 0 deletions kvirt/providers/gcp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpu
region = self.region
if self.exists(name):
return {'result': 'failure', 'reason': f"VM {name} already exists"}
lb = overrides.get('loadbalancer')
kubetype = metadata.get('kubetype')
if flavor is None:
if numcpus != 1 and numcpus % 2 != 0:
Expand Down Expand Up @@ -376,6 +377,8 @@ def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpu
tags.extend([kube])
if securitygroups:
tags.extend(securitygroups)
if lb is not None and lb not in tags:
tags.append(lb)
if tags:
body['tags'] = {'items': tags}
newval = {'key': 'serial-port-enable', 'value': 1}
Expand Down Expand Up @@ -435,6 +438,9 @@ def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpu
return {'result': 'failure', 'reason': str(e)}
if reservedns and domain is not None:
self.reserve_dns(name, nets=nets, domain=domain, alias=alias)
if lb is not None:
self.update_metadata(name, 'loadbalancer', lb, append=True)
self.add_vm_to_loadbalancer(name, lb)
return {'result': 'success'}

def start(self, name):
Expand Down Expand Up @@ -2048,3 +2054,14 @@ def set_router_mode(self, name, mode=True):
return {'result': 'failure', 'reason': msg}
vm['can_ip_forward'] = mode
conn.instances().update(zone=zone, project=project, instance=name, body=vm).execute()

def add_vm_to_loadbalancer(self, vm, lb):
sane_name = lb.replace('.', '-')
conn = self.conn
project = self.project
zone = self.zone
vmpath = f"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances"
instances_body = {"instances": [{"instance": f"{vmpath}/{vm}"}]}
operation = conn.instanceGroups().addInstances(project=project, zone=zone, instanceGroup=sane_name,
body=instances_body).execute()
self._wait_for_operation(operation)

0 comments on commit 886d732

Please sign in to comment.