From 5f36bc0a17034af841e098973c12372be06b2924 Mon Sep 17 00:00:00 2001 From: Antonio Costa Date: Thu, 10 Aug 2023 16:24:49 +0200 Subject: [PATCH 1/5] fix: when deploying k8s 1.24.3 on RHOS --- .../kubernetes/k8s_install_passstore.yml | 10 +- .../playbook/kubernetes/k8s_install_tools.yml | 81 +++++ .../playbook/ocp/ocp_openstack_install.yml | 23 +- ansible/playbook/openstack/README.adoc | 60 +--- ansible/playbook/openstack/check.yml | 12 + ...auth_passstore_v3applicationcredential.yml | 15 + .../openstack_auth_passstore_v3password.yml | 16 + ...k_list_objects_v3applicationcredential.yml | 55 ++++ .../openstack_list_objects_v3password.yml | 62 ++++ .../openstack_vm_create_passwordstore.yml | 79 +---- ...openstack_vm_create_post_passwordstore.yml | 72 +++++ .../passstore/passstore_controller_init.yml | 1 + .../passstore_controller_inventory.yml | 60 ++-- ansible/roles/containerd/defaults/main.yml | 1 + ansible/roles/containerd/tasks/install.yml | 45 ++- ansible/roles/k8s_cluster/defaults/main.yml | 7 +- ansible/roles/k8s_cluster/tasks/install.yml | 62 ++-- .../roles/k8s_cluster/tasks/install_cni.yml | 62 ++-- .../k8s_cluster/tasks/install_cni_l124.yml | 44 +++ .../templates/calico/custom-resources.yaml.j2 | 27 ++ .../templates/clusterrole-admin.j2 | 2 +- ansible/roles/k8s_dashboard/tasks/install.yml | 7 +- .../k8s_issuer_certificate/tasks/install.yml | 3 +- .../k8s_issuer_certificate/tasks/main.yml | 5 +- .../k8s_issuer_certificate/tasks/remove.yml | 2 +- .../templates/godaddy_token_secret.yml.j2 | 3 +- .../ansible_inventory/tasks/main.yml | 1 - kubernetes/README.adoc | 301 ++++++++++++++++++ kubernetes/README.md | 213 ------------- openstack/README.adoc | 1 - passwordstore/README.adoc | 107 +++++++ 31 files changed, 992 insertions(+), 447 deletions(-) create mode 100644 ansible/playbook/kubernetes/k8s_install_tools.yml create mode 100644 ansible/playbook/openstack/check.yml create mode 100644 ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml create mode 100644 ansible/playbook/openstack/openstack_auth_passstore_v3password.yml create mode 100644 ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml create mode 100644 ansible/playbook/openstack/openstack_list_objects_v3password.yml create mode 100644 ansible/playbook/openstack/openstack_vm_create_post_passwordstore.yml create mode 100644 ansible/roles/k8s_cluster/tasks/install_cni_l124.yml create mode 100644 ansible/roles/k8s_cluster/templates/calico/custom-resources.yaml.j2 create mode 100644 kubernetes/README.adoc delete mode 100644 kubernetes/README.md create mode 100644 passwordstore/README.adoc diff --git a/ansible/playbook/kubernetes/k8s_install_passstore.yml b/ansible/playbook/kubernetes/k8s_install_passstore.yml index dd163a98..52ceb83d 100644 --- a/ansible/playbook/kubernetes/k8s_install_passstore.yml +++ b/ansible/playbook/kubernetes/k8s_install_passstore.yml @@ -1,9 +1,9 @@ --- -- name: "Install Kubernetes" - import_playbook: "k8s_install.yml" +- name: "Install Kubernetes using Passwordstore" + ansible.builtin.import_playbook: "k8s_install.yml" - name: "Get k8s configuration" - import_playbook: "k8s_get_config.yml" + ansible.builtin.import_playbook: "k8s_get_config.yml" # Requires: # . k8s_config @@ -34,4 +34,8 @@ pass_l3: "{{ vm_name }}" var_name: "k8s_dashboard_token" var_value: "{{ k8s_dashboard_token }}" + +- name: "Install Kubernetes using Passwordstore" + ansible.builtin.import_playbook: "k8s_install_tools.yml" + ... diff --git a/ansible/playbook/kubernetes/k8s_install_tools.yml b/ansible/playbook/kubernetes/k8s_install_tools.yml new file mode 100644 index 00000000..b443ca48 --- /dev/null +++ b/ansible/playbook/kubernetes/k8s_install_tools.yml @@ -0,0 +1,81 @@ +--- +# Doesn't work, doesn't return api_key and api_secret variables. +# Don't kow why :( +# - name: "Build GoDaddy authentication, if not provided" +# import_playbook: "../godaddy/godaddy_auth_passwordstore.yml" +# when: api_key is undefined and api_secret is undefined +# tags: [always] + +- name: "Install Kubernetes tools" + hosts: "{{ vm_name | default('masters') }}" + gather_facts: true + + pre_tasks: + - name: "Set godaddy auth facts" + ansible.builtin.set_fact: + api_key: "{{ query('passwordstore', 'godaddy/' ~ api_environment | default('ote') ~ '/api-key')[0] }}" + api_secret: "{{ query('passwordstore', 'godaddy/' ~ api_environment | default('ote') ~ '/secret-key')[0] }}" + when: api_key is undefined and api_secret is undefined + tags: [always] + + - name: "Validate required variables" + assert: + that: + - "vm_name is defined and vm_name | length > 0" + - "state is defined and (state == 'present' or state == 'absent')" + - "api_key is defined and api_secret is defined" + fail_msg: + - "Required parameters:" + - " vm_name: VM to where the tolls will be deployed" + - " state: 'present' to install the tools and 'absent' to remove them" + tags: [always] + + tasks: + # Requires GoDaddy API Key and Secret Key + - name: "Install k8s_issuer_certificate" + ansible.builtin.import_role: + name: k8s_issuer_certificate + # vars: + # api_key: "{{ api_key }}" + # api_secret: "{{ api_secret }}" + tags: [k8s_issuer_certificate] + +# - role: 'cert_manager' +# tags: [always,cert_manager] +# - role: 'k8s_dashboard' +# tags: [always,k8s_dashboard] +# - { role: 'persistence', tags: 'persistence'} # When Minishift or oc cluster up is NOT used +# - { role: 'identity_provider', tags: 'identity_provider'} # Use HTPasswdPasswordIdentityProvider as Identity Provider -> more secure +# - { role: 'enable_cluster_role', tags: 'enable_cluster_role'} +# - { role: 'add_extra_users', tags: 'add_extra_users'} +# - { role: 'delete_extra_users', tags: 'delete_extra_users'} +# - { role: 'install_oc', tags: 'install_oc'} # Install oc client when cluster role is not used +# - { role: 'docker', tags: 'docker'} # Enable to access docker on port 2376 +# - { role: 'create_projects', tags: 'create_projects'} +# - { role: 'install_nexus', tags: 'nexus'} +# - { role: 'install_jenkins', tags: 'jenkins'} +# - { role: 'install_jaeger', tags: 'jaeger'} +# - { role: 'install_istio', tags: 'istio'} +# - { role: 'install_launcher', tags: 'install-launcher'} +# - { role: 'uninstall_launcher', tags: 'uninstall-launcher'} +# - { role: 'component_crd_operator', tags: 'component_crd_operator'} +# - { role: 'snowdrop_site', tags: 'snowdrop-site'} +# - { role: 'tekton_pipelines', tags: 'tekton_pipelines'} +# - { role: 'halkyon', tags: 'halkyon'} +# # - { role: 'k8s_cluster', tags: 'k8s_cluster'} +# - { role: 'k8s_config', tags: 'k8s_config'} +# # - { role: 'k8s_dashboard', tags: 'k8s_dashboard'} +# - { role: 'k8s_service_broker', tags: 'k8s_service_broker'} +# - { role: 'k8s_halkyon_site', tags: 'k8s_halkyon_site'} +# - { role: 'k8s_issuer_certificate', tags: 'k8s_issuer_certificate'} +# - { role: 'kind', tags: 'kind'} +# # - { role: 'ingress', tags: 'ingress'} +# - { role: 'helm', tags: 'helm'} +# - { role: 'kubedb', tags: 'kubedb'} +# - { role: 'docker_registry', tags: 'docker_registry'} +# - { role: 'tekton_pipelines', tags: 'tekton_pipelines'} +# - { role: 'component_crd_operator', tags: 'component_crd_operator'} +# - { role: 'ocp4_console', tags: 'ocp4_console'} +# # - { role: 'cert_manager', tags: 'cert_manager'} + +... diff --git a/ansible/playbook/ocp/ocp_openstack_install.yml b/ansible/playbook/ocp/ocp_openstack_install.yml index 161cec9a..3c9363e8 100644 --- a/ansible/playbook/ocp/ocp_openstack_install.yml +++ b/ansible/playbook/ocp/ocp_openstack_install.yml @@ -1,18 +1,21 @@ --- +- name: "Build OpenStack authentication for v3password" + import_playbook: "../openstack/openstack_auth_passstore_v3password.yml" + - name: "Install OCP" hosts: localhost gather_facts: true - pre_tasks: - - name: "Set openstack_auth facts" - set_fact: - openstack_auth: - openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" - openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" - openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" - openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" - openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" - openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" + # pre_tasks: + # - name: "Set openstack_auth facts" + # set_fact: + # openstack_auth: + # openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" + # openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" + # openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" + # openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" + # openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" + # openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" tasks: - name: "Deploy OCP" diff --git a/ansible/playbook/openstack/README.adoc b/ansible/playbook/openstack/README.adoc index 80d4b311..ba34d853 100644 --- a/ansible/playbook/openstack/README.adoc +++ b/ansible/playbook/openstack/README.adoc @@ -1,7 +1,9 @@ = OpenStack Ansible Playbooks Snowdrop Team (Antonio Costa) +Snowdrop Team (Antonio Costa) :icons: font :revdate: {docdate} +:revdate: {docdate} :toc: left :description: This document describes OpenStack specific playbooks. ifdef::env-github[] @@ -26,64 +28,16 @@ NOTE: The list of flavors is identified on the link:../../../openstack/README.ad == Playbooks +=== Create a VM === Create a VM Create OpenStack instance based on passwordstore .openstack_vm_create_passwordstore parameters [cols="2,5"] +[cols="2,5"] |=== -| Parameter | Description - -| `openstack.vm.flavor` - -[.fuchsia]#string# - -[.red]#required# - -a| OpenStack VM flavor (size) - -_e.g._ `m1.medium`. - -| `openstack.vm.image` - -[.fuchsia]#string# - -[.red]#required# - -a| OpenStack VM image - -_e.g._ `Fedora-Cloud-Base-35`. - -| `openstack.vm.network` - -[.fuchsia]#string# - -[.red]#required# - -a| Value for the OpenStack provider network. - -_e.g._ `provider_net_shared` - -// | k8s_type - -// [.fuchsia]#string# - -// a| *for k8s hosts.* - -// Kubernetes host type [master,worker]. - -// | k8s_version - -// [.fuchsia]#string# - -// a| *for k8s hosts.* - -// Kubernetes version to be associated with the host, e.g. for version `1.23` use `123`. This is actually an Ansible Inventory group having definitions associated with each of the Kubernetes version. - -// | key_name - -// [.fuchsia]#string# +|Field name |Mandatory |Description // a| Use an existing SSH key (value) instead of creating one for the VM. @@ -101,12 +55,12 @@ This name will be used both as hostname as well as Ansible Inventory name. [source,bash] ---- -$ VM_NAME=vm20230627-t01 +$ VM_NAME=vm20210221-t01 ---- [source,bash] ---- -$ ansible-playbook ansible/playbook/openstack/openstack_vm_create_passwordstore.yml -e '{"openstack": {"vm": {"network": "provider_net_shared","image": "Fedora-Cloud-Base-35", "flavor": "m1.medium"}}}' -e vm_name=${VM_NAME} +$ ansible-playbook playbook/openstack/openstack_vm_create_passwordstore.yml -e k8s_type=masters -e k8s_version=123 -e '{"openstack": {"vm": {"network": "provider_net_shared","image": "Fedora-Cloud-Base-35", "flavor": "m1.medium"}}}' -e key_name=test-adm-key -e vm_name=${VM_NAME} ---- Although some failures might occur some might be ignored which shouldn't affect thhe process. This playbook should finish with no failed tasks. diff --git a/ansible/playbook/openstack/check.yml b/ansible/playbook/openstack/check.yml new file mode 100644 index 00000000..74262421 --- /dev/null +++ b/ansible/playbook/openstack/check.yml @@ -0,0 +1,12 @@ +--- +- name: "Check OpenStack" + set_fact: + pass_provider: "openstack" + when: "pass_provider is not defined" + +- name: "Set OpenStack default variables" + set_fact: + use_generic_ssh_key: True + generic_ssh_key_name: generic-key + when: "use_generic_ssh_key is not defined or (use_generic_ssh_key | bool) " +... diff --git a/ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml b/ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml new file mode 100644 index 00000000..39af40c9 --- /dev/null +++ b/ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml @@ -0,0 +1,15 @@ +--- +- name: "OpenStack authentication with passwordstore and v3applicationcredential" + hosts: localhost + + tasks: + + - name: "Set facts" + ansible.builtin.set_fact: + rhos_authentication_type: v3applicationcredential + rhos_authentication: + auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" + application_credential_id: "{{ query('passwordstore', 'openstack/host/app_cred_id')[0] }}" + application_credential_secret: "{{ query('passwordstore', 'openstack/host/app_cred_secret')[0] }}" + +... diff --git a/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml b/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml new file mode 100644 index 00000000..896902a2 --- /dev/null +++ b/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml @@ -0,0 +1,16 @@ +--- +- name: "OpenStack authentication with passwordstore and v3password" + hosts: localhost + gather_facts: false + + tasks: + - name: "Set openstack_auth facts" + set_fact: + openstack_auth: + openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" + openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" + openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" + openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" + openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" + openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" +... diff --git a/ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml b/ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml new file mode 100644 index 00000000..e7248441 --- /dev/null +++ b/ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml @@ -0,0 +1,55 @@ +--- +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3applicationcredential.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" + +- name: "OpenStack Authentication" + hosts: localhost + + tasks: + + - name: "Get auth_token" + openstack.cloud.auth: + auth_type: "{{ rhos_authentication_type }}" + auth: "{{ rhos_authentication }}" + register: auth_result + + - name: "Print Openstack Authentication result" + ansible.builtin.debug: + msg: "auth_result: {{ auth_result }}" + verbosity: 0 + + + - name: List Fedora images + openstack.cloud.image_info: + auth_type: "{{ rhos_authentication_type }}" + auth: "{{ rhos_authentication }}" + properties: + os_distro: "fedora" + register: image_info_result + + - name: "Print Openstack output" + ansible.builtin.debug: + var: image_info_result + + # https://docs.openstack.org/ocata/cli-reference/glance-property-keys.html + - name: List Fedora images + openstack.cloud.image_info: + # token, v2token, v3token, admin_token + auth_type: token + auth: + auth_url: "https://rhos-d.infra.prod.upshift.rdu2.redhat.com:13000" + # token: "{{ auth_result.ansible_facts['auth_token'] }}" + token: "{{ auth_result.auth_token }}" + interface: "internal" + properties: + os_distro: "fedora" + register: image_info_result + + - name: "Print Openstack output" + ansible.builtin.debug: + var: image_info_result +... +# ansible-playbook ansible/playbook/openstack/openstack_auth.yml diff --git a/ansible/playbook/openstack/openstack_list_objects_v3password.yml b/ansible/playbook/openstack/openstack_list_objects_v3password.yml new file mode 100644 index 00000000..4af6d2a6 --- /dev/null +++ b/ansible/playbook/openstack/openstack_list_objects_v3password.yml @@ -0,0 +1,62 @@ +--- +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3password.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" + +- name: "OpenStack Authentication" + hosts: localhost + gather_facts: false + + tasks: + + - name: "Print Openstack output" + ansible.builtin.debug: + var: image_info_result + + - name: List all images + openstack.cloud.image_info: + auth: + project_name: "{{ openstack_auth.openstack_project_name }}" + username: "{{ openstack_auth.openstack_console_user }}" + password: "{{ openstack_auth.openstack_console_password }}" + user_domain_name: "{{ openstack_auth.openstack_user_domain }}" + project_domain_name: "{{ openstack_auth.openstack_project_domain }}" + auth_url: "{{ openstack_auth.openstack_os_auth_url }}" + register: rhos_image_list_for_print + + - name: Print RHOS images + ansible.builtin.debug: + var: rhos_image_list_for_print + + - name: List all flavors + openstack.cloud.compute_flavor_info: + auth: + project_name: "{{ openstack_auth.openstack_project_name }}" + username: "{{ openstack_auth.openstack_console_user }}" + password: "{{ openstack_auth.openstack_console_password }}" + user_domain_name: "{{ openstack_auth.openstack_user_domain }}" + project_domain_name: "{{ openstack_auth.openstack_project_domain }}" + auth_url: "{{ openstack_auth.openstack_os_auth_url }}" + register: rhos_flavor_list_for_print + + - name: Print RHOS flavors + ansible.builtin.debug: + var: rhos_flavor_list_for_print + + - name: List all networks + openstack.cloud.networks_info: + auth: + project_name: "{{ openstack_auth.openstack_project_name }}" + username: "{{ openstack_auth.openstack_console_user }}" + password: "{{ openstack_auth.openstack_console_password }}" + user_domain_name: "{{ openstack_auth.openstack_user_domain }}" + project_domain_name: "{{ openstack_auth.openstack_project_domain }}" + auth_url: "{{ openstack_auth.openstack_os_auth_url }}" + register: rhos_network_list + + - name: Print RHOS networks + ansible.builtin.debug: + var: rhos_network_list +... diff --git a/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml b/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml index a3ae0b91..04fd49e0 100644 --- a/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml +++ b/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml @@ -10,37 +10,35 @@ # . k8s_version: Kubernetes version [117 ... 121], empty for no k8s installation - name: "Validate passwordstore" - import_playbook: "../passstore/passstore_controller_check.yml" + ansible.builtin.import_playbook: "../passstore/passstore_controller_check.yml" +# tag::initialize_passwordstore_inventory[] # tag::initialize_passwordstore_inventory[] - name: "Initialize passwordstore inventory" - import_playbook: "../passstore/passstore_controller_inventory.yml" + ansible.builtin.import_playbook: "../passstore/passstore_controller_inventory.yml" vars: vm_user: "snowdrop" pass_provider: "openstack" # end::initialize_passwordstore_inventory[] +# tag::instanciate_rhos_auth[] +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3password.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" +# end::instanciate_rhos_auth[] + - name: "Openstack VM create" hosts: localhost gather_facts: True - - pre_tasks: - - name: "Set openstack_auth facts" - ansible.builtin.set_fact: - openstack_auth: - openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" - openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" - openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" - openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" - openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" - openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" tasks: # outputs: # . openstack_vm_ipv4 # . openstack_output - name: "Execute create inventory, if tagged as so" - include_role: + ansible.builtin.include_role: name: "snowdrop.cloud_infra.openstack_vm" apply: tags: @@ -53,55 +51,6 @@ set_fact: openstack_vm_ipv4: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host create=True userpass=' + openstack_vm_ipv4 )[0] }}" -- name: "Refresh inventory" - hosts: localhost - gather_facts: True - - tasks: - - name: Refresh the inventory so the newly added host is available - meta: refresh_inventory - - - name: "Add host to known hosts {{ hostvars[vm_name]['ansible_ssh_host'] }}" - ansible.builtin.known_hosts: - name: "{{ hostvars[vm_name]['ansible_ssh_host'] }}" - key: "{{ lookup('pipe', 'ssh-keyscan {{ hostvars[vm_name].ansible_ssh_host }}') }}" - hash_host: true - -- name: "Wait for the VM to boot and we can ssh" - hosts: "{{ vm_name }}" - gather_facts: no - - tasks: - - name: "Show 'Wait for connection to host' output" - debug: - msg: - - "ip : {{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host')[0] }}" - - "port : {{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_port')[0] }}" - - - name: "Wait for connection to host" - ansible.builtin.wait_for: - host: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host')[0] }}" - port: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_port')[0] }}" - timeout: 120 - register: wait_for_connection_reg - - post_tasks: - - name: "DON'T FORGET TO SECURE YOUR SERVER" - debug: - msg: "Trying to start start server securization automatically For manual execution: $ ansible-playbook ansible/playbook/sec_host.yml -e vm_name={{ vm_name }} -e provider=openstack" - -- name: "Openstack VM init" - hosts: "{{ vm_name }}" - gather_facts: yes - - roles: - - role: "openstack/init_vm" - -- name: "Secure new server" - import_playbook: "../sec_host.yml" - vars: - provider: "openstack" - hosts: "{{ vm_name }}" - vm_name: "{{ vm_name }}" - tags: [always] +- name: "Execute post create actions" + ansible.builtin.import_playbook: "openstack_vm_create_post_passwordstore.yml" ... diff --git a/ansible/playbook/openstack/openstack_vm_create_post_passwordstore.yml b/ansible/playbook/openstack/openstack_vm_create_post_passwordstore.yml new file mode 100644 index 00000000..b9f4c00c --- /dev/null +++ b/ansible/playbook/openstack/openstack_vm_create_post_passwordstore.yml @@ -0,0 +1,72 @@ +--- +# Variables: +# - "vm_name": "n311-test" + +# tag::instanciate_rhos_auth[] +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3password.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" +# end::instanciate_rhos_auth[] + +- name: "Refresh inventory" + hosts: localhost + gather_facts: True + + tasks: + - name: Refresh the inventory so the newly added host is available + meta: refresh_inventory + + - name: "Post create actions" + ansible.builtin.debug: + msg: + - "Next step is adding the host to the controller known hosts." + - "If it fails, because the VM is not started yet, it can be executed again manualy using the following command:" + - "" + - "ansible-playbook ansible/playbook/openstack/openstack_vm_create_post_passwordstore.yml -e vm_name={{ vm_name }}" + + - name: "Add host to known hosts {{ hostvars[vm_name]['ansible_ssh_host'] }}" + ansible.builtin.known_hosts: + name: "{{ hostvars[vm_name]['ansible_ssh_host'] }}" + key: "{{ lookup('pipe', 'ssh-keyscan {{ hostvars[vm_name].ansible_ssh_host }}') }}" + hash_host: true + +- name: "Wait for the VM to boot and we can ssh" + hosts: "{{ vm_name }}" + gather_facts: no + + tasks: + - name: "Show 'Wait for connection to host' output" + debug: + msg: + - "ip : {{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host')[0] }}" + - "port : {{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_port')[0] }}" + + - name: "Wait for connection to host" + ansible.builtin.wait_for: + host: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host')[0] }}" + port: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_port')[0] }}" + timeout: 120 + register: wait_for_connection_reg + + post_tasks: + - name: "DON'T FORGET TO SECURE YOUR SERVER" + debug: + msg: "Trying to start start server securization automatically For manual execution: $ ansible-playbook ansible/playbook/sec_host.yml -e vm_name={{ vm_name }} -e provider=openstack" + +- name: "Openstack VM init" + hosts: "{{ vm_name }}" + gather_facts: yes + + roles: + - role: "openstack/init_vm" + +- name: "Secure new server" + import_playbook: "../sec_host.yml" + vars: + provider: "openstack" + hosts: "{{ vm_name }}" + vm_name: "{{ vm_name }}" + tags: [always] +... diff --git a/ansible/playbook/passstore/passstore_controller_init.yml b/ansible/playbook/passstore/passstore_controller_init.yml index b245ef30..7df0483f 100644 --- a/ansible/playbook/passstore/passstore_controller_init.yml +++ b/ansible/playbook/passstore/passstore_controller_init.yml @@ -15,6 +15,7 @@ assert: that: - "pass_provider is defined" + - "pass_provider == 'hetzner' or pass_provider == 'openstack'" fail_msg: - "Required parameters:" - " pass_provider: provider in the passstore project [hetzner,openstack]" diff --git a/ansible/playbook/passstore/passstore_controller_inventory.yml b/ansible/playbook/passstore/passstore_controller_inventory.yml index a1d4f9cf..4e6d3b65 100644 --- a/ansible/playbook/passstore/passstore_controller_inventory.yml +++ b/ansible/playbook/passstore/passstore_controller_inventory.yml @@ -2,51 +2,55 @@ # Required variables: # . vm_name: Name of the vm # . pass_provider: provider in the passstore project [hetzner] -# . k8s_type: Kubernetes host type [masters,nodes], empty for no k8s installation -# . k8s_version: Kubernetes version [117 ... 121], empty for no k8s installation - name: "Generate inventory files on the controller" hosts: localhost gather_facts: no pre_tasks: - # - name: "Validate required variables" - # assert: - # that: - # - "vm_name is defined" - # - "pass_provider is defined" - # - "k8s_type is defined" - # - "k8s_version is defined" - # fail_msg: - # - "Required parameters:" - # - " vm_name: Name of the vm" - # - " pass_provider: provider in the passstore project [hetzner]" - # - " k8s_type: Kubernetes host type [masters,nodes], empty for no k8s installation" - # - " k8s_version: Kubernetes version [115,116], empty for no k8s installation" + - name: "Validate required variables" + assert: + that: + - "vm_name is defined" + - "pass_provider is defined" + - "pass_provider == 'hetzner' or pass_provider == 'openstack'" + fail_msg: + - "Required parameters:" + - " vm_name: Name of the vm" + - " pass_provider: provider in the passstore project [hetzner,openstack]" - name: "Pull pass git database" shell: "git pull" args: chdir: "{{ lookup('env', 'PASSWORD_STORE_DIR') }}" + - name: "Print operation" + ansible.builtin.debug: + var: operation + tasks: - - name: "Create inventory" - include_role: - name: "passstore/ansible_inventory" - vars: - pass_l1: "{{ pass_db_name | default('snowdrop') }}" - pass_l2: "{{ pass_provider | default('hetzner') }}" - pass_l3: "{{ vm_name }}" - operation: "create" - when: "operation is defined and operation == 'create' " - - name: "Build inventory" - include_role: + - name: "Print operation 2" + ansible.builtin.debug: + var: operation + + - name: "Prepare inventory" + ansible.builtin.include_role: name: "passstore/ansible_inventory" vars: pass_l1: "{{ pass_db_name | default('snowdrop') }}" - pass_l2: "{{ pass_provider | default('hetzner') }}" + pass_l2: "{{ pass_provider | default('openstack') }}" pass_l3: "{{ vm_name }}" - when: "operation is undefined or operation != 'create'" + # operation: "create" + # when: "operation is defined and operation == 'create' " + + # - name: "Build inventory" + # include_role: + # name: "passstore/ansible_inventory" + # vars: + # pass_l1: "{{ pass_db_name | default('snowdrop') }}" + # pass_l2: "{{ pass_provider | default('hetzner') }}" + # pass_l3: "{{ vm_name }}" + # when: "operation is undefined or operation != 'create'" - name: "Add to k8s version" include_role: diff --git a/ansible/roles/containerd/defaults/main.yml b/ansible/roles/containerd/defaults/main.yml index e69de29b..ba6d55e0 100644 --- a/ansible/roles/containerd/defaults/main.yml +++ b/ansible/roles/containerd/defaults/main.yml @@ -0,0 +1 @@ +cni_plugins_version: v1.3.0 diff --git a/ansible/roles/containerd/tasks/install.yml b/ansible/roles/containerd/tasks/install.yml index 9327a441..97ceb7aa 100644 --- a/ansible/roles/containerd/tasks/install.yml +++ b/ansible/roles/containerd/tasks/install.yml @@ -3,21 +3,21 @@ template: src: "containerd.conf.j2" dest: "/etc/modules-load.d/containerd.conf" - become: yes + become: true - name: "modprobe overlay" shell: "modprobe overlay" - become: yes + become: true - name: "modprobe br_netfilter" shell: "modprobe br_netfilter" - become: yes + become: true - name: Install dependencies needed by containerd package: name: "{{ containerd_dependencies }}" state: present - become: yes + become: true - name: "Add docker repository" include_role: @@ -28,17 +28,17 @@ package: name: "{{ containerd_packages }}" state: present - become: yes + become: true - name: Ensures /etc/containerd dir exists file: path: "/etc/containerd" state: "directory" - become: yes + become: true - name: "Configure containerd" shell: "containerd config default > /etc/containerd/config.toml" - become: yes + become: true - name: "Set root folder" lineinfile: @@ -46,14 +46,29 @@ regexp: '^root = "/var/lib/containerd"' line: 'root = "{{ containerd_root_dir }}"' when: "containerd_root_dir is defined" - become: yes + become: true -- name: "Set cni folder" - lineinfile: - path: "/etc/containerd/config.toml" - regexp: '^ bin_dir = "/opt/cni/bin"' - line: ' bin_dir = "/usr/libexec/cni"' - become: yes +# - name: "Set cni folder" +# lineinfile: +# path: "/etc/containerd/config.toml" +# regexp: '^ bin_dir = "/opt/cni/bin"' +# line: ' bin_dir = "/usr/libexec/cni"' +# become: true + +- name: Create CNI plugin folder + ansible.builtin.file: + mode: 0755 + path: /opt/cni/bin + state: directory + become: true + +- name: Get CNI plugin for containerd + ansible.builtin.get_url: + url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_plugins_version }}/cni-plugins-linux-amd64-{{ cni_plugins_version }}.tgz" + dest: /opt/cni/bin + mode: 0755 + become: true + - name: Start containerd systemd: @@ -61,6 +76,6 @@ name: containerd enabled: yes daemon_reload: yes - become: yes + become: true ... diff --git a/ansible/roles/k8s_cluster/defaults/main.yml b/ansible/roles/k8s_cluster/defaults/main.yml index 19cc6571..0ed85dff 100644 --- a/ansible/roles/k8s_cluster/defaults/main.yml +++ b/ansible/roles/k8s_cluster/defaults/main.yml @@ -5,8 +5,8 @@ remove: false remote: true ingress_host: "{{ ansible_ssh_host }}.nip.io" -install_docker: false -install_containerd: true +# install_docker: false +# install_containerd: true firewalld_public_ports: - 6443/tcp @@ -24,3 +24,6 @@ pod_subnet: 10.244.0.0/16 service_subnet: 10.96.0.0/12 coreos_flannel_sha_commit: 2140ac876ef134e0ed5af15c65e414cf26827915 + +calico_version: v3.26.1 +cp_name: control-plane diff --git a/ansible/roles/k8s_cluster/tasks/install.yml b/ansible/roles/k8s_cluster/tasks/install.yml index c7c6f6a7..674f617f 100644 --- a/ansible/roles/k8s_cluster/tasks/install.yml +++ b/ansible/roles/k8s_cluster/tasks/install.yml @@ -18,7 +18,7 @@ permanent: yes state: enabled notify: Restart firewalld - become: yes + become: true - name: Add source network to trusted sources firewalld: @@ -30,7 +30,7 @@ - "{{ pod_subnet }}" - "{{ service_subnet }}" notify: Restart firewalld - become: yes + become: true - name: Open k8s known ports firewalld: @@ -40,7 +40,7 @@ state: enabled loop: "{{ firewalld_public_ports }}" notify: Restart firewalld - become: yes + become: true - name: Open k8s known services firewalld: @@ -50,7 +50,7 @@ state: enabled loop: "{{ firewalld_public_services }}" notify: Restart firewalld - become: yes + become: true - name: "Flush handlers" meta: flush_handlers @@ -76,7 +76,7 @@ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude: kube* state: present - become: yes + become: true register: k8s_add_repo_res failed_when: "k8s_add_repo_res.rc is defined and k8s_add_repo_res.rc > 0 and k8s_add_repo_res.stderr is defined and 'signature could not be verified for kubernetes' not in k8s_add_repo_res.stderr" @@ -90,7 +90,7 @@ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude: kube* state: present - become: yes + become: true register: k8s_add_repo_alt_res when: "k8s_add_repo_res is not defined or (k8s_add_repo_res.rc is defined and k8s_add_repo_res.rc > 0 and k8s_add_repo_res.stderr is defined and 'signature could not be verified for kubernetes' in k8s_add_repo_res.stderr)" @@ -104,14 +104,14 @@ - kubectl-{{ k8s_version }} - kubeadm-{{ k8s_version }} - kubelet-{{ k8s_version }} - become: yes + become: true - name: Disable selinux shell: | setenforce 0 sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config getenforce - become: yes + become: true - name: Start kubelet systemd: @@ -119,34 +119,52 @@ name: kubelet daemon_reload: yes enabled: yes - become: yes + become: true - name: Create k8s conf file template: src: "k8s.conf.j2" dest: /etc/sysctl.d/k8s.conf - become: yes + register: k8s_conf_file + become: true - name: Make k8s conf file take effect command: "sysctl --system" - become: yes + become: true + when: k8s_conf_file.changed + +- name: Remove CRI plugin disable from containerd toml + ansible.builtin.replace: + path: /etc/containerd/config.toml + # regexp: '^disabled_plugins\ =\ \["cri"\]' + # replace: '#disabled_plugins = ["cri"]' + regexp: '^(disabled_plugins)(.*)("cri")(.*)' + replace: '\1\2\4' + register: enable_cri_plugin + become: true + +- name: Restart containerd + ansible.builtin.service: + name: containerd + state: restarted + become: true + when: enable_cri_plugin.changed - name: Pull k8s linux images for version {{ k8s_version }} command: "kubeadm config images pull --kubernetes-version {{ k8s_version }}" - become: yes + become: true - name: "Checks k8s bootstrap status" stat: path: "/etc/kubernetes/admin.conf" register: k8s_config_file - - name: "Initialize cluster for k8s - {{ k8s_version }}" command: "kubeadm init --pod-network-cidr={{ pod_subnet }} --apiserver-advertise-address={{ api_server }} --kubernetes-version {{ k8s_version }}" register: kubeadm_init_res changed_when: "kubeadm_init_res.rc == 0" failed_when: "kubeadm_init_res.rc != 0 and 'already exists' not in kubeadm_init_res.stderr" - become: yes + become: true when: "not k8s_config_file.stat.exists" - name: "Checks k8s bootstrap status" @@ -161,12 +179,17 @@ state: "present" when: "not home_k8s_config_file.stat.exists" -- name: "Install CNI" - include_tasks: install_cni.yml +- name: "Install CNI < 1.24" + ansible.builtin.include_tasks: install_cni_l124.yml + when: k8s_version is version('1.24.0', '<') + +- name: "Install CNI >= 1.24" + ansible.builtin.include_tasks: install_cni.yml + when: k8s_version is version('1.24.0', '>=') - name: "Check if nodes are already tainted" shell: | - kubectl get nodes -o jsonpath="{.items[].spec.taints[?(@.key=='node-role.kubernetes.io/master')]}" + kubectl get nodes -o jsonpath="{.items[].spec.taints[?(@.key=='node-role.kubernetes.io/{{ cp_name }}')]}" register: kc_check_tainted_nodes ignore_errors: True @@ -175,8 +198,9 @@ msg: "kc_check_tainted_nodes: {{ kc_check_tainted_nodes }}" when: "kc_check_tainted_nodes is defined" -- name: Taint the node - command: "kubectl taint nodes --all node-role.kubernetes.io/master-" +- name: Taint the nodes + command: | + kubectl taint nodes --all node-role.kubernetes.io/{{ cp_name }}- register: kc_taint_node_res failed_when: "kc_taint_node_res.rc != 0 and 'not found' not in kc_taint_node_res.stderr" diff --git a/ansible/roles/k8s_cluster/tasks/install_cni.yml b/ansible/roles/k8s_cluster/tasks/install_cni.yml index 9e2f8192..e78845d4 100644 --- a/ansible/roles/k8s_cluster/tasks/install_cni.yml +++ b/ansible/roles/k8s_cluster/tasks/install_cni.yml @@ -1,44 +1,50 @@ --- -- name: "Define CNI installation method" - set_fact: - cni_new: "{{ ansible_distribution == 'Fedora' or ((ansible_distribution == 'CentOS' or ansible_os_family == 'RedHat') and ansible_distribution_major_version != '7') }}" - -- name: "Install Flannel Virtual Network for pod communication" - shell: | - kubectl -n kube-system get deployment coredns -o yaml | sed 's/allowPrivilegeEscalation: false/allowPrivilegeEscalation: true/g' | kubectl apply -f - - kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/{{ coreos_flannel_sha_commit }}/Documentation/kube-flannel.yml - when: "not cni_new | bool" - - name: "Check if Calico is already deployed" shell: "kubectl -n kube-system get daemonset.apps calico-node -o jsonpath='{.metadata.name}'" register: kc_calico_ds ignore_errors: True - when: "cni_new | bool" - name: "Print calico output" debug: msg: "kc_calico_ds: {{ kc_calico_ds }}" - when: "(cni_new | bool) and kc_calico_ds is defined" + when: "kc_calico_ds is defined" - name: "Decide if Calico installation is needed" set_fact: - install_calico: "(cni_new | bool) and (kc_calico_ds is undefined or (kc_calico_ds.rc != 0 and 'NotFound' not in kc_calico_ds.stderr))" + install_calico: "(kc_calico_ds is undefined or (kc_calico_ds.rc != 0 and 'NotFound' not in kc_calico_ds.stderr))" + +- name: "Pull Calico images" + shell: | + docker pull docker.io/calico/cni:{{ calico_version }} + docker pull docker.io/calico/typha:{{ calico_version }} + docker.io/calico/pod2daemon-flexvol:{{ calico_version }} + register: kc_tigera_calico + failed_when: kc_tigera_calico.rc > 0 and 'already exists' not in kc_tigera_calico.stderr - name: "Print Decide calico output" debug: - msg: "install_calico{{ item}}" - with_items: - - "install_calico: {{ install_calico }}" - - "install_calico: {{ install_calico }}" - -- name: "Generate Calico manifest file" - template: - src: "calico/calico.yaml.j2" - dest: "/tmp/calico.yaml" - become: yes - when: "install_calico" - -- name: "Apply Calico manifest" - shell: "kubectl apply -f /tmp/calico.yaml" - when: "install_calico" + msg: "install_calico {{ install_calico }}" + +# https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart +- name: "Install the Tigera Calico operator and custom resource definitions" + shell: "kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/{{ calico_version }}/manifests/tigera-operator.yaml" + register: kc_tigera_calico + failed_when: kc_tigera_calico.rc > 0 and 'already exists' not in kc_tigera_calico.stderr + +# Must use custom custom-resources.yaml due to CIDR address +# - name: "Install Calico by creating the necessary custom resource" +# shell: "kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/{{ calico_version }}/manifests/custom-resources.yaml" +# register: kc_calico +# failed_when: kc_calico.rc > 0 and 'already exists' not in kc_calico.stderr +- name: "Template Calico custom resource" + ansible.builtin.template: + src: calico/custom-resources.yaml.j2 + dest: /tmp/custom-resources.yaml + +- name: "Install Calico by creating the necessary custom resource" + shell: | + kubectl create -f /tmp/custom-resources.yaml + register: kc_calico + failed_when: kc_calico.rc > 0 and 'already exists' not in kc_calico.stderr + ... diff --git a/ansible/roles/k8s_cluster/tasks/install_cni_l124.yml b/ansible/roles/k8s_cluster/tasks/install_cni_l124.yml new file mode 100644 index 00000000..9e2f8192 --- /dev/null +++ b/ansible/roles/k8s_cluster/tasks/install_cni_l124.yml @@ -0,0 +1,44 @@ +--- +- name: "Define CNI installation method" + set_fact: + cni_new: "{{ ansible_distribution == 'Fedora' or ((ansible_distribution == 'CentOS' or ansible_os_family == 'RedHat') and ansible_distribution_major_version != '7') }}" + +- name: "Install Flannel Virtual Network for pod communication" + shell: | + kubectl -n kube-system get deployment coredns -o yaml | sed 's/allowPrivilegeEscalation: false/allowPrivilegeEscalation: true/g' | kubectl apply -f - + kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/{{ coreos_flannel_sha_commit }}/Documentation/kube-flannel.yml + when: "not cni_new | bool" + +- name: "Check if Calico is already deployed" + shell: "kubectl -n kube-system get daemonset.apps calico-node -o jsonpath='{.metadata.name}'" + register: kc_calico_ds + ignore_errors: True + when: "cni_new | bool" + +- name: "Print calico output" + debug: + msg: "kc_calico_ds: {{ kc_calico_ds }}" + when: "(cni_new | bool) and kc_calico_ds is defined" + +- name: "Decide if Calico installation is needed" + set_fact: + install_calico: "(cni_new | bool) and (kc_calico_ds is undefined or (kc_calico_ds.rc != 0 and 'NotFound' not in kc_calico_ds.stderr))" + +- name: "Print Decide calico output" + debug: + msg: "install_calico{{ item}}" + with_items: + - "install_calico: {{ install_calico }}" + - "install_calico: {{ install_calico }}" + +- name: "Generate Calico manifest file" + template: + src: "calico/calico.yaml.j2" + dest: "/tmp/calico.yaml" + become: yes + when: "install_calico" + +- name: "Apply Calico manifest" + shell: "kubectl apply -f /tmp/calico.yaml" + when: "install_calico" +... diff --git a/ansible/roles/k8s_cluster/templates/calico/custom-resources.yaml.j2 b/ansible/roles/k8s_cluster/templates/calico/custom-resources.yaml.j2 new file mode 100644 index 00000000..32f457d2 --- /dev/null +++ b/ansible/roles/k8s_cluster/templates/calico/custom-resources.yaml.j2 @@ -0,0 +1,27 @@ +# Calico version 3.26.1 +# This section includes base Calico installation configuration. +# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation +apiVersion: operator.tigera.io/v1 +kind: Installation +metadata: + name: default +spec: + # Configures Calico networking. + calicoNetwork: + # Note: The ipPools section cannot be modified post-install. + ipPools: + - blockSize: 26 + cidr: {{ pod_subnet }} + encapsulation: VXLANCrossSubnet + natOutgoing: Enabled + nodeSelector: all() + +--- + +# This section configures the Calico API server. +# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer +apiVersion: operator.tigera.io/v1 +kind: APIServer +metadata: + name: default +spec: {} diff --git a/ansible/roles/k8s_cluster/templates/clusterrole-admin.j2 b/ansible/roles/k8s_cluster/templates/clusterrole-admin.j2 index 7de4ddbc..1477d4ec 100644 --- a/ansible/roles/k8s_cluster/templates/clusterrole-admin.j2 +++ b/ansible/roles/k8s_cluster/templates/clusterrole-admin.j2 @@ -1,4 +1,4 @@ -{% if k8s_version.startswith('1.23') %} +{% if k8s_version is version('1.23.0', '>=') %} apiVersion: rbac.authorization.k8s.io/v1 {% else %} apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/ansible/roles/k8s_dashboard/tasks/install.yml b/ansible/roles/k8s_dashboard/tasks/install.yml index 37353810..658cf014 100644 --- a/ansible/roles/k8s_dashboard/tasks/install.yml +++ b/ansible/roles/k8s_dashboard/tasks/install.yml @@ -27,14 +27,17 @@ - name: "Collect the dashboard Token" shell: | - kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user \ - -o jsonpath='{.secrets[0].name}') {% raw %}-o go-template='{{.data.token | base64decode}}'{% endraw %} + kubectl -n kubernetes-dashboard create token admin-user + # kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user \ + # -o jsonpath='{.secrets[0].name}') {% raw %}-o go-template='{{.data.token | base64decode}}'{% endraw %} register: k8s_dashboard_token_res - name: "Add the dashboard token to the passwordstore" set_fact: k8s_dashboard_token: "{{ k8s_dashboard_token_res.stdout }}" +# TODO: This should be done at playbook level. T +# This role should only return variables and not perform these kind of operations - name: "Add the dashboard token to the passwordstore" set_fact: k8s_dashboard_token_pass: "{{ query('passwordstore', pass_provider + '/' + ansible_hostname + '/k8s_dashboard_token overwrite=yes nosymbols=true create=True userpass=' + k8s_dashboard_token_res)[0] | lower }}" diff --git a/ansible/roles/k8s_issuer_certificate/tasks/install.yml b/ansible/roles/k8s_issuer_certificate/tasks/install.yml index ee293380..01ed1cc3 100644 --- a/ansible/roles/k8s_issuer_certificate/tasks/install.yml +++ b/ansible/roles/k8s_issuer_certificate/tasks/install.yml @@ -47,4 +47,5 @@ - name: Deploy the Godaddy secrets, Issuers resources containing the API Token shell: | - {{ client_tool }} {{ k8s_config }} apply -f {{ tempPath }} + {{ client_tool }} apply -f {{ tempPath }} +... diff --git a/ansible/roles/k8s_issuer_certificate/tasks/main.yml b/ansible/roles/k8s_issuer_certificate/tasks/main.yml index 6e640b80..574fdacc 100644 --- a/ansible/roles/k8s_issuer_certificate/tasks/main.yml +++ b/ansible/roles/k8s_issuer_certificate/tasks/main.yml @@ -1,5 +1,4 @@ - set_fact: - k8s_config: "--kubeconfig '/etc/kubernetes/admin.conf'" client_tool: kubectl when: not isOpenshift | bool @@ -7,7 +6,7 @@ var: k8s_config - include_tasks: install.yml - when: not remove | bool + when: state == 'present' - include_tasks: remove.yml - when: remove | bool + when: state == 'absent' diff --git a/ansible/roles/k8s_issuer_certificate/tasks/remove.yml b/ansible/roles/k8s_issuer_certificate/tasks/remove.yml index eab9f107..a2b32bba 100644 --- a/ansible/roles/k8s_issuer_certificate/tasks/remove.yml +++ b/ansible/roles/k8s_issuer_certificate/tasks/remove.yml @@ -1,6 +1,6 @@ - name: Delete the Godaddy Secrets, Issuers & Certificates resources shell: | - {{ client_tool }} {{ k8s_config }} delete issuer,certificate,secret -l app=ca-cert -n {{ item }} + {{ client_tool }} delete issuer,certificate,secret -l app=ca-cert -n {{ item }} with_items: - snowdrop-site - generator-site diff --git a/ansible/roles/k8s_issuer_certificate/templates/godaddy_token_secret.yml.j2 b/ansible/roles/k8s_issuer_certificate/templates/godaddy_token_secret.yml.j2 index 584c8e5d..5a73421f 100644 --- a/ansible/roles/k8s_issuer_certificate/templates/godaddy_token_secret.yml.j2 +++ b/ansible/roles/k8s_issuer_certificate/templates/godaddy_token_secret.yml.j2 @@ -7,4 +7,5 @@ metadata: namespace: {{ item.namespace }} type: Opaque stringData: - token: "{{ godaddy_token }}" +# token: "{ { godaddy_token } }" + token: "sso-key {{ api_key }}:{{ api_secret }}" diff --git a/ansible/roles/passstore/ansible_inventory/tasks/main.yml b/ansible/roles/passstore/ansible_inventory/tasks/main.yml index a8657eac..96f7af3f 100644 --- a/ansible/roles/passstore/ansible_inventory/tasks/main.yml +++ b/ansible/roles/passstore/ansible_inventory/tasks/main.yml @@ -1,5 +1,4 @@ --- -# Tasks for get_provider_host_keys - name: "Get passwordstore work folder from password_store_dir override variable" set_fact: my_passwordstore_folder: "{{ password_store_dir }}" diff --git a/kubernetes/README.adoc b/kubernetes/README.adoc new file mode 100644 index 00000000..109c2541 --- /dev/null +++ b/kubernetes/README.adoc @@ -0,0 +1,301 @@ += Kubernetes +Snowdrop Team (Antonio costa) +:icons: font +:icon-set: fas +:revdate: {docdate} +:toc: left +:toclevels: 3 +:description: This document describes the requirements, and the process to execute to install a k8s cluster on a host. The installation will be done using Ansible. +ifdef::env-github[] +:tip-caption: :bulb: +:note-caption: :information_source: +:important-caption: :heavy_exclamation_mark: +:caution-caption: :fire: +:warning-caption: :warning: +endif::[] + +== Introduction + +This document describes the requirements, and the process to execute to +install a k8s cluster on a host. The installation will be done using +Ansible. + +=== Scope + +Describe the steps to execute to install k8s on a host. + +== Requirements + +First of all follow the instructions in the +link:../ansible/playbook/README.md#installation-guide[Ansible +Installation Guide section]. + +=== Ansible Inventory + +In order to execute the installation of k8s several variables must be +provided. To standardize the installation several Ansible Groups have +been created for different installations. + +To populate these variables, some groups, with the corresponding group +variables, have been created in the +link:../ansible/inventory/hosts.yml[`hosts.yml`] inventory file. + +The following table shows the existing groups for k8s. + +[width="100%",cols="25%,25%m,50%",options="header",] +|=== +|Group Type |Group Name |Description +|Components |masters |Kubernetes control plane. Includes information +such as firewall ports and services to be open as well as internal +subnet information. + +|Components |nodes |Kubernetes node. Similar to masters but for k8s +nodes. + +|Versions |k8s_116 |Information v 1.16 specific + +|Versions |k8s_115 |Information v 1.15 specific +|=== + +Installing kubernetes requires a host to be assigned to 2 groups, +identified from the previous table as _Group Type_, a k8s component and +a k8s version. + +More information on versions on the link:../ansible/inventory/hosts.yml[`hosts.yml`] Ansible inventory file. + +.Click to see the k8s yaml file configuration +[%collapsible] +==== +[source,yaml] +---- +include::../ansible/inventory/hosts.yml[tag=k8s_version] +---- +==== + +=== Host provisioning + +Provisioning a host is done using the appropriate Ansible Playbooks. + +The first step is to generate the inventory. More information in the local +passwordstore link:../passwordstore/README.adoc[Define host inventory for provisioning] document. + +Once the inventory is prepared the host can be created. To provision a RHOS VM check the link:../openstack/README.adoc[OpenStack] documenation. The latest configuration used is the following. + +[width="100%",cols="40%,60%m",options="header",] +|=== +| Attribute | Value + +| Flavor | ci.m4.xlarge + +| Image | Fedora-Cloud-Base-37 +|=== + +To remove the host from the inventory check the link:../passwordstore/README.adoc[Remove host from inventory] secion from the passwordstore document. + +== Installation + +Once the host is defined in the inventory and also provisioned, execute the k8s creation playbook. + +Ansible tags are used to manage which components are to be installed. The tags that can be selected are the following. + +[width="100%",cols="25%m,10%c,65%",options="header",] +|=== +| Tag | Always | Description + +| containerd | icon:times[] | Installs link:https://containerd.io/[containerd] as CRI + +| docker | icon:times[] | Installs Docker as CRI + +| ingress | icon:times[] | Installs link:https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress] + +| k8s_cluster | icon:check[] | Installs the Kubernetes cluster + +| k8s_dashboard | icon:times[] | Installs the link:https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/[Kubernetes Dashboard] +|=== + +.Deploy kubernetes on a host installing docker and the Dashboard +[source,bash] +---- +ansible-playbook ansible/playbook/kubernetes/k8s_install_passstore.yml -e vm_name=${VM_NAME} --tags docker,k8s_dashboard +---- + +[WARNING] +==== +Be sure that a host group entry exists for the version you +want to install within the `inventory/hosts` file + +[source,yaml] +---- + k8s_121: + vars: + k8s_version: 1.21.4 + k8s_dashboard_version: v2.3.1 +---- +==== + +Example for installing a k8s server from scratch using openstack provider +where we will create a VM. + +[source,bash] +---- +VM_NAME=snowdrop-vm \ + && ansible-playbook hetzner/ansible/hetzner-delete-server.yml -e vm_name=${VM_NAME} -e hetzner_context_name=snowdrop \ + ansible-playbook ansible/playbook/passstore_controller_inventory_remove.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner \ + && ansible-playbook hetzner/ansible/hetzner-create-ssh-key.yml -e vm_name=${VM_NAME} \ + && ansible-playbook ansible/playbook/passstore_controller_inventory.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner -e k8s_type=masters -e k8s_version=115 -e operation=create \ + && ansible-playbook hetzner/ansible/hetzner-create-server.yml -e vm_name=${VM_NAME} -e salt_text=$(gpg --gen-random --armor 1 20) -e hetzner_context_name=snowdrop \ + && ansible-playbook ansible/playbook/sec_host.yml -e vm_name=${VM_NAME} -e provider=hetzner \ + && ansible-playbook kubernetes/ansible/k8s.yml --limit ${VM_NAME} +---- + +[NOTE] +==== +Both kubernetes playbooks (`k8s` and `k8s-misc`) can have its host overridden using the `override_host` variable, e.g., +`-e override_host=localhost` to launch it on the controller itself. +==== + +To uninstall a kubernetes cluster (kubeadmin, kubelet, ..), execute this +command. + +.Delete kubernetes cluster +[source,bash] +---- +ansible-playbook ansible/playbook/kubernetes/k8s_remove.yml -e vm_name=${VM_NAME} +---- + +== Other k8s tools + +To deploy other k8s tools. + +.Common parameters +[cols="2,5"] +|=== +| Parameter | Description + +| `vm_name` + +[.fuchsia]#string# + +[.red]#required# + +a| Name of the VM where the tools will be installed. + +|=== + +[source,bash] +---- +ansible-playbook ansible/playbook/kubernetes/k8s_install_tools.yml -e vm_name=${VM_NAME} -e letsencrypt_env=prod --tags k8s_issuer_certificate +---- + + +.k8s_issuer_certificate parameters +[cols="2,5"] +|=== +| Parameter | Description + +| `api_key` + +[.fuchsia]#string# + +[.red]#required# + +a| GoDaddy API key. + +| `api_secret` + +[.fuchsia]#string# + +[.red]#required# + +a| GoDaddy API secretkey. + +| `letsencrypt_env` + +[.fuchsia]#string# + +a| Let's Encrypt environment to use. + +* *`staging` <= Default:* Staging environment +* `prod`: Production environment + +|=== + +== Troubleshooting + +=== Expired k8s certificate + +==== Problem + +* kubelet service shows connection errors. +* The docker container running the k8s API server cannot be started + +==== Cause + +[source,bash] +---- +$ docker logs xxxxxxxxxxxx +... +W0121 11:09:31.447982 1 clientconn.go:1251] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 0 }. Err :connection error: desc = "transport: authentication handshake failed: x509: certificate has expired or is not yet valid". Reconnecting... +---- + +Check the validity of the kubernetes certificate using the following +command. If they have been expired, then apply the trick as defined at +the link:#solution-k8s-cert-sol[Solution] section + +[source,bash] +---- +$ openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep ' Not ' +---- + +[[k8s-cert-sol]] +==== Solution + +The solution applied was the +https://stackoverflow.com/questions/56320930/renew-kubernetes-pki-after-expired/56334732#56334732[this +answer on stackoverflow thread] applied to our k8s 1.14 cluster. + +Other references: * +https://www.ibm.com/support/knowledgecenter/SSCKRH_1.1.0/platform/t_certificate_renewal.html + +[source,bash] +---- +$ cd /etc/kubernetes/pki/ +$ mv {apiserver.crt,apiserver-etcd-client.key,apiserver-kubelet-client.crt,front-proxy-ca.crt,front-proxy-client.crt,front-proxy-client.key,front-proxy-ca.key,apiserver-kubelet-client.key,apiserver.key,apiserver-etcd-client.crt} ~/ +$ kubeadm init phase certs all --apiserver-advertise-address +$ cd /etc/kubernetes/ +$ mv {admin.conf,controller-manager.conf,kubelet.conf,scheduler.conf} ~/ +$ kubeadm init phase kubeconfig all +$ reboot +---- + +And then update the user’s kube config. + +[source,bash] +---- +$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +---- + +=== Cannot log in using kubelet + +==== Problem + +[source,bash] +---- +$ kubectl get pods +error: You must be logged in to the server (Unauthorized) +---- + +This might happen for instance after renewing the certificates. + +==== Cause + +The `~/.kube/config` does not contain the client-certificate-data and +client-key-data updated after renewing the certificate. + +[[solution]] +==== Solution + +[source,bash] +---- +$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +---- diff --git a/kubernetes/README.md b/kubernetes/README.md deleted file mode 100644 index 8b3f04cc..00000000 --- a/kubernetes/README.md +++ /dev/null @@ -1,213 +0,0 @@ -# Table of Contents - -- [Table of Contents](#table-of-contents) -- [Introduction](#introduction) - - [Scope](#scope) -- [Requirements](#requirements) - - [Ansible Inventory](#ansible-inventory) - - [Host provisioning](#host-provisioning) - - [Host-Group Association](#host-group-association) -- [Installation](#installation) -- [Troublehsooting](#troublehsooting) - - [Expired k8s certificate](#expired-k8s-certificate) - - [Problem](#problem) - - [Cause](#cause) - - [Solution {#k8s-cert-sol}](#solution-k8s-cert-sol) - - [Cannot login using kubelet](#cannot-login-using-kubelet) - - [Problem](#problem-1) - - [Cause](#cause-1) - - [Solution](#solution) - -# Introduction - -This document describes the requirements, and the process to execute to install a k8s cluster on a host. The installation will be done using Ansible. - -## Scope - -Describe the steps to execute to install k8s on a host. - -# Requirements - -First of all follow the instructions in the [Ansible Installation Guide section](../ansible/playbook/README.md#installation-guide). - -## Ansible Inventory - -In order to execute the installation of k8s several variables must be provided. To standardize the installation several Ansible Groups have been created for different installations. - -To populate these variables, some groups, with the corresponding group variables, have been created in the [`hosts.yml`](../ansible/inventory/hosts.yml) inventory file. - -The following table shows the existing groups for k8s. - -| Group Type| Group Name | Description | -| --- | --- | --- | -| Components | masters | Kubernetes control plane. Includes information such as firewall ports and services to be open as well as internal subnet information. | -| Components | nodes | Kubernetes node. Similar to masters but for k8s nodes. | -| Versions | k8s_116 | Information v 1.16 specific | -| Versions | k8s_115 | Information v 1.15 specific | - -Installing kubernetes requires a host to be assigned to 2 groups, identified from the previous table as *Group Type*, a k8s component and a k8s version. - -## Host provisioning - -Provisioning a host is done using the appropriate Ansible Playbooks. - -First create the Ansible Inventory records as indicated in the [Create a host](../ansible/playbook/README.md#create-a-host) section of the ansible playbook documentation. - -In this example we create the inventory for a new vm to be provisioned in the hetzner provider. - -```bash -$ ansible-playbook ansible/playbook/passstore_controller_inventory.yml -e vm_name=my-host -e pass_provider=hetzner -e k8s_type=masters -e k8s_version=115 --tags create -``` - -In the pass database we can now see the following structure. - -``` -├── hetzner -| ├── my-host -│   │   ├── ansible_ssh_port -│   │   ├── groups -│   │   │   ├── k8s_115 -│   │   │   └── masters -│   │   ├── id_rsa -│   │   ├── id_rsa.pub -│   │   ├── os_password -│   │   ├── os_user -│   │   └── ssh_port -``` - -This host has already been added to the `masters` and `k8s_115` groups as parte of the script. - -To remove the host from the inventory... - -```bash -$ ansible-playbook ansible/playbook/passstore_controller_inventory_remove.yml -e vm_name=my-host -e pass_provider=hetzner -``` - -## Host-Group Association - -Once the host is in the inventory it can be associated with groups. - -For instance, to install k8s control plane for version 1.15 in a newly created host (`my-host` in this example) we have to to add that host to the `masters` and `k8s_115` groups. -To perform this operation use the `passstore_manage_host_groups.yml` playbook, as shown in the following example. - -Add a host to the `masters` group and to the `k8s_115` group. - -```bash -$ ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=masters -e vm_name=my-host -$ ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=k8s_115 -e vm_name=my-host -``` - -To remove a host from the `k8s_115` group... - -```bash -$ ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=remove -e group_name=k8s_115 -e vm_name=my-host -``` - -More information on how hosts are assigned to groups and actually adding and removing hosts from groups [here](../ansible/playbook/README.md#groups). - -# Installation - -Once the host is defined in the inventory and also provisioned, execute the k8s creation playbook. - -```bash -$ ansible-playbook kubernetes/ansible/k8s.yml --limit -``` - -The `limit` option tells ansible to only execute the playbook to the hosts limited in the statement. -Kubernetes version can be changed using the parameter `-e k8s_version=1.21.4` - -**WARNING**: Be sure that a host group entry exists for the version you want to install within the `inventory/hosts` file -```yaml - k8s_121: - vars: - k8s_version: 1.21.4 - k8s_dashboard_version: v2.3.1 -``` - -Example for installing a k8s server from scratch using hetzner provider where we will create a VM. - -```bash -$ VM_NAME=xXx \ - ; ansible-playbook hetzner/ansible/hetzner-delete-server.yml -e vm_name=${VM_NAME} -e hetzner_context_name=snowdrop \ - ; ansible-playbook ansible/playbook/passstore_controller_inventory_remove.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner \ - && ansible-playbook hetzner/ansible/hetzner-create-ssh-key.yml -e vm_name=${VM_NAME} \ - && ansible-playbook ansible/playbook/passstore_controller_inventory.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner -e k8s_type=masters -e k8s_version=115 -e operation=create \ - && ansible-playbook hetzner/ansible/hetzner-create-server.yml -e vm_name=${VM_NAME} -e salt_text=$(gpg --gen-random --armor 1 20) -e hetzner_context_name=snowdrop \ - && ansible-playbook ansible/playbook/sec_host.yml -e vm_name=${VM_NAME} -e provider=hetzner \ - && ansible-playbook kubernetes/ansible/k8s.yml --limit ${VM_NAME} -``` - -> NOTE: Both kubernetes playbooks (`k8s` and `k8s-misc`) can have its host overridden using the `override_host` variable, e.g., `-e override_host=localhost` to launch it on the controller itself. - -To delete the kubernetes cluster (kubeadmin, kubelet, ..), execute this comma,d -```bash -ansible-playbook kubernetes/ansible/k8s.yml --limit ${VM_NAME} -e remove=true -``` - -# Troubleshooting - -## Expired k8s certificate - -### Problem - -- kubelet service shows connection errors. -- The docker container running the k8s API server cannot be started - -### Cause - -```bash -$ docker logs xxxxxxxxxxxx -... -W0121 11:09:31.447982 1 clientconn.go:1251] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 0 }. Err :connection error: desc = "transport: authentication handshake failed: x509: certificate has expired or is not yet valid". Reconnecting... -``` - - Check the validity of the kubernetes certificate using the following command. If they have been expired, then apply the trick as defined at the [Solution](#solution-k8s-cert-sol) section - -```bash -$ openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep ' Not ' -``` -### Solution {#k8s-cert-sol} - -The solution applied was the [this answer on stackoverflow thread](https://stackoverflow.com/questions/56320930/renew-kubernetes-pki-after-expired/56334732#56334732) applied to our k8s 1.14 cluster. - -Other references: -* https://www.ibm.com/support/knowledgecenter/SSCKRH_1.1.0/platform/t_certificate_renewal.html - -```bash -$ cd /etc/kubernetes/pki/ -$ mv {apiserver.crt,apiserver-etcd-client.key,apiserver-kubelet-client.crt,front-proxy-ca.crt,front-proxy-client.crt,front-proxy-client.key,front-proxy-ca.key,apiserver-kubelet-client.key,apiserver.key,apiserver-etcd-client.crt} ~/ -$ kubeadm init phase certs all --apiserver-advertise-address -$ cd /etc/kubernetes/ -$ mv {admin.conf,controller-manager.conf,kubelet.conf,scheduler.conf} ~/ -$ kubeadm init phase kubeconfig all -$ reboot -``` - -And then update the user's kube config. - -```bash -$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -``` - -## Cannot log in using kubelet - -### Problem - -```bash -$ kubectl get pods -error: You must be logged in to the server (Unauthorized) -``` - -This might happen for instance after renewing the certificates. - -### Cause - -The `~/.kube/config` does not contain the client-certificate-data and client-key-data updated after renewing the certificate. - -### Solution - -```bash -$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -``` - - diff --git a/openstack/README.adoc b/openstack/README.adoc index de415081..39e3f1ce 100644 --- a/openstack/README.adoc +++ b/openstack/README.adoc @@ -55,7 +55,6 @@ Different OS images are available on Openstack. === Flavors - .OpenStack Flavor information [%header,cols="2m,1,1,1,1,1"] |=== diff --git a/passwordstore/README.adoc b/passwordstore/README.adoc new file mode 100644 index 00000000..5f9ff476 --- /dev/null +++ b/passwordstore/README.adoc @@ -0,0 +1,107 @@ += Passwordstore Database Implementation +Snowdrop Team (Antonio Costa) +:icons: font +:revdate: {docdate} +:toc: left +:description: This document describes the implementation of the passwordstore. +database in this project. +ifdef::env-github[] +:tip-caption: :bulb: +:note-caption: :information_source: +:important-caption: :heavy_exclamation_mark: +:caution-caption: :fire: +:warning-caption: :warning: +endif::[] + +== Introduction + +This document describes the implementation of the passwordstore. + +== Prepare controller + +To prepare the controller for existing common keys use the `passstore_controller_init` playbook. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_controller_init.yml -e pass_provider=openstack +---- + +== Ansible Inventory + +== Define host inventory for provisioning + +Provisioning a host is done using the appropriate Ansible Playbooks. + +First create the Ansible Inventory records as indicated in the +link:../ansible/playbook/README.md#create-a-host[Create a host] section +of the ansible playbook documentation. + +In this example we create the inventory for a new vm to be provisioned +in the hetzner provider. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_controller_inventory.yml \ + -e vm_name=my-host \ + -e pass_provider=openstack \ + -e k8s_type=masters \ + -e k8s_version=124 \ + -e operation=create +---- + +In the pass database we can now see the following structure. + +[source] +---- +├── openstack +... +│ ├── snowdrop-k8s +│ │ ├── ansible_ssh_port +│ │ ├── groups +│ │ │ ├── k8s_124 +│ │ │ └── masters +│ │ ├── os_password +│ │ ├── os_user +│ │ └── ssh_port +---- + +This host has already been added to the `masters` and `k8s_115` groups +as parte of the script. + +== Remove host from inventory + +To remove a host from the passwordstore inventory use the following playbook. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_controller_inventory.yml -e vm_name=my-host -e pass_provider=openstack +---- + +== Host-Group Association + +Once the host is in the inventory it can be associated with groups. + +For instance, to install k8s control plane for version 1.15 in a newly +created host (`my-host` in this example) we have to to add that host to +the `masters` and `k8s_115` groups. To perform this operation use the +`passstore_manage_host_groups.yml` playbook, as shown in the following +example. + +Add a host to the `masters` group and to the `k8s_115` group. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=masters -e vm_name=my-host +ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=k8s_115 -e vm_name=my-host +---- + +To remove a host from the `k8s_115` group. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=remove -e group_name=k8s_115 -e vm_name=my-host +---- + +More information on how hosts are assigned to groups and actually adding +and removing hosts from groups +link:../ansible/playbook/README.md#groups[here]. From 588bc4e6b790084a7907c0fc0fd8ef70b1269ef5 Mon Sep 17 00:00:00 2001 From: Antonio Costa Date: Wed, 30 Aug 2023 12:21:05 +0200 Subject: [PATCH 2/5] feat: remove hetzner as provider --- .../passstore/passstore_controller_init.yml | 8 +++++--- .../passstore_controller_inventory.yml | 17 +++-------------- .../passstore_controller_inventory_remove.yml | 9 +++++++-- .../passstore/passstore_manage_host_groups.yml | 4 ++-- 4 files changed, 17 insertions(+), 21 deletions(-) diff --git a/ansible/playbook/passstore/passstore_controller_init.yml b/ansible/playbook/passstore/passstore_controller_init.yml index 7df0483f..120e162b 100644 --- a/ansible/playbook/passstore/passstore_controller_init.yml +++ b/ansible/playbook/passstore/passstore_controller_init.yml @@ -5,7 +5,7 @@ # . Import snowdrop generic key # # Required variables: -# . pass_provider: provider in the passstore project [hetzner,openstack] +# . pass_provider: provider in the passstore project [openstack] - name: "Init controller" hosts: localhost gather_facts: no @@ -15,10 +15,12 @@ assert: that: - "pass_provider is defined" - - "pass_provider == 'hetzner' or pass_provider == 'openstack'" + - "pass_provider == 'openstack'" fail_msg: - "Required parameters:" - - " pass_provider: provider in the passstore project [hetzner,openstack]" + - " pass_provider:" + - " - must be defined" + - " - provider in the passstore project [openstack]" - name: "Pull pass git database" shell: "git pull" diff --git a/ansible/playbook/passstore/passstore_controller_inventory.yml b/ansible/playbook/passstore/passstore_controller_inventory.yml index 4e6d3b65..34d9d95f 100644 --- a/ansible/playbook/passstore/passstore_controller_inventory.yml +++ b/ansible/playbook/passstore/passstore_controller_inventory.yml @@ -1,7 +1,7 @@ --- # Required variables: # . vm_name: Name of the vm -# . pass_provider: provider in the passstore project [hetzner] +# . pass_provider: provider in the passstore project [openstack] - name: "Generate inventory files on the controller" hosts: localhost gather_facts: no @@ -12,11 +12,11 @@ that: - "vm_name is defined" - "pass_provider is defined" - - "pass_provider == 'hetzner' or pass_provider == 'openstack'" + - "pass_provider == 'openstack'" fail_msg: - "Required parameters:" - " vm_name: Name of the vm" - - " pass_provider: provider in the passstore project [hetzner,openstack]" + - " pass_provider: provider in the passstore project [openstack]" - name: "Pull pass git database" shell: "git pull" @@ -40,17 +40,6 @@ pass_l1: "{{ pass_db_name | default('snowdrop') }}" pass_l2: "{{ pass_provider | default('openstack') }}" pass_l3: "{{ vm_name }}" - # operation: "create" - # when: "operation is defined and operation == 'create' " - - # - name: "Build inventory" - # include_role: - # name: "passstore/ansible_inventory" - # vars: - # pass_l1: "{{ pass_db_name | default('snowdrop') }}" - # pass_l2: "{{ pass_provider | default('hetzner') }}" - # pass_l3: "{{ vm_name }}" - # when: "operation is undefined or operation != 'create'" - name: "Add to k8s version" include_role: diff --git a/ansible/playbook/passstore/passstore_controller_inventory_remove.yml b/ansible/playbook/passstore/passstore_controller_inventory_remove.yml index 6c91f149..4fad36a6 100644 --- a/ansible/playbook/passstore/passstore_controller_inventory_remove.yml +++ b/ansible/playbook/passstore/passstore_controller_inventory_remove.yml @@ -2,7 +2,7 @@ # file: passstore_controller_inventory_remove.yml # Required variables: # . vm_name: Name of the vm -# . pass_provider: provider in the passstore project [hetzner] +# . pass_provider: provider in the passstore project [openstack] - name: Delete local inventory configuration hosts: localhost gather_facts: no @@ -15,7 +15,12 @@ - "vm_name | length > 0" - "pass_provider is defined" - "pass_provider | length > 0" - fail_msg: "'vm_name' and 'pass_provider' must be defined" + fail_msg: + - "Required parameters:" + - " vm_name must be defined" + - " pass_provider:" + - " - must be defined" + - " - provider in the passstore project [openstack]" - name: "Pull pass git database" shell: "git pull" diff --git a/ansible/playbook/passstore/passstore_manage_host_groups.yml b/ansible/playbook/passstore/passstore_manage_host_groups.yml index 401d8ac6..75729aa7 100644 --- a/ansible/playbook/passstore/passstore_manage_host_groups.yml +++ b/ansible/playbook/passstore/passstore_manage_host_groups.yml @@ -38,7 +38,7 @@ - name: Check vm_pass_provider value fail: - msg: "Missing or invalid vm_pass_provider {{ vm_pass_provider }}. Should be in [hetzner]" + msg: "Missing or invalid vm_pass_provider {{ vm_pass_provider }}. Should be in [openstack]" when: "vm_pass_provider is not defined or vm_pass_provider | length == 0" - name: Check operation value @@ -82,4 +82,4 @@ shell: "git push" args: chdir: "{{ lookup('env', 'PASSWORD_STORE_DIR') }}" -... \ No newline at end of file +... From 7d9e2f9b6ac443f100f3e85f969ffdcbfbb41931 Mon Sep 17 00:00:00 2001 From: Antonio Costa Date: Wed, 30 Aug 2023 13:21:24 +0200 Subject: [PATCH 3/5] docs: improved passwordstore documentation --- ansible/playbook/passstore/README.adoc | 113 +++++++++++++++++++++++ passwordstore/README.adoc | 118 ++++++++++--------------- 2 files changed, 160 insertions(+), 71 deletions(-) create mode 100644 ansible/playbook/passstore/README.adoc diff --git a/ansible/playbook/passstore/README.adoc b/ansible/playbook/passstore/README.adoc new file mode 100644 index 00000000..b595eb2e --- /dev/null +++ b/ansible/playbook/passstore/README.adoc @@ -0,0 +1,113 @@ += Passwordstore Ansible Playbooks +Snowdrop Team (Antonio Costa) +Snowdrop Team (Antonio Costa) +:icons: font +:revdate: {docdate} +:revdate: {docdate} +:toc: left +:description: This document describes the passstore specific playbooks. +ifdef::env-github[] +:tip-caption: :bulb: +:note-caption: :information_source: +:important-caption: :heavy_exclamation_mark: +:caution-caption: :fire: +:warning-caption: :warning: +endif::[] + +== Prepare controller + +To prepare the controller for existing common keys use the `passstore_controller_init` playbook. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_controller_init.yml -e pass_provider=openstack +---- + +== Ansible Inventory + +== Define host inventory for provisioning + +Provisioning a host is done using the appropriate Ansible Playbooks. + +First create the Ansible Inventory records as indicated in the +link:../ansible/playbook/README.md#create-a-host[Create a host] section +of the ansible playbook documentation. + +In this example we create the inventory for a new vm to be provisioned +in the hetzner provider. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_controller_inventory.yml \ + -e vm_name=my-host \ + -e pass_provider=openstack \ + -e operation=create +---- + +In the pass database we can now see the following structure. + +[source] +---- +├── openstack +... +│ ├── snowdrop-k8s +│ │ ├── ansible_ssh_port +│ │ ├── groups +│ │ │ ├── k8s_124 +│ │ │ └── masters +│ │ ├── os_password +│ │ ├── os_user +│ │ └── ssh_port +---- + +This host has already been added to the `masters` and `k8s_115` groups +as parte of the script. + +== Remove host from inventory + +To remove a host from the passwordstore inventory use the following playbook. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_controller_inventory_remove.yml \ + -e vm_name=my-host \ + -e pass_provider=openstack +---- + +== Host-Group Association + +Once the host is in the inventory it can be associated with groups. + +For instance, to install k8s control plane for version 1.15 in a newly +created host (`my-host` in this example) we have to to add that host to +the `masters` and `k8s_115` groups. To perform this operation use the +`passstore_manage_host_groups.yml` playbook, as shown in the following +example. + +Add a host to the `masters` group and to the `k8s_124` group. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_manage_host_groups.yml \ + -e operation=add \ + -e group_name=masters \ + -e vm_name=my-host +ansible-playbook ansible/playbook/passstore/passstore_manage_host_groups.yml \ + -e operation=add \ + -e group_name=k8s_124 \ + -e vm_name=my-host +---- + +To remove a host from the `k8s_124` group. + +[source,bash] +---- +ansible-playbook ansible/playbook/passstore/passstore_manage_host_groups.yml \ + -e operation=remove \ + -e group_name=k8s_124 \ + -e vm_name=my-host +---- + +More information on how hosts are assigned to groups and actually adding +and removing hosts from groups +link:../ansible/playbook/README.md#groups[here]. diff --git a/passwordstore/README.adoc b/passwordstore/README.adoc index 5f9ff476..fbcf54ae 100644 --- a/passwordstore/README.adoc +++ b/passwordstore/README.adoc @@ -3,7 +3,7 @@ Snowdrop Team (Antonio Costa) :icons: font :revdate: {docdate} :toc: left -:description: This document describes the implementation of the passwordstore. +:description: This document describes the passwordstore implementation. database in this project. ifdef::env-github[] :tip-caption: :bulb: @@ -15,93 +15,69 @@ endif::[] == Introduction -This document describes the implementation of the passwordstore. +This document describes the implementation of the passwordstore in the project. -== Prepare controller +== Overview -To prepare the controller for existing common keys use the `passstore_controller_init` playbook. +The pass implementation uses a link:https://www.passwordstore.org/[pass] +database to store the Ansible inventory and other infrastructure related +information. -[source,bash] ----- -ansible-playbook ansible/playbook/passstore/passstore_controller_init.yml -e pass_provider=openstack ----- +=== Ansible pass Structure -== Ansible Inventory +Besides storing other information, the pass database also stores the team +Ansible Inventory. -== Define host inventory for provisioning +The structure of the inventory is the following: -Provisioning a host is done using the appropriate Ansible Playbooks. +[width="100%",cols="20%,20%,10%,50%",options="header",] +|=== +| Level | Type | Data | Comments -First create the Ansible Inventory records as indicated in the -link:../ansible/playbook/README.md#create-a-host[Create a host] section -of the ansible playbook documentation. +| Level 1 +| Directory +| Provider +a| The name of the provider is used to split VMs for different providers. -In this example we create the inventory for a new vm to be provisioned -in the hetzner provider. +Currently only `openstack` is a tested provider. -[source,bash] ----- -ansible-playbook ansible/playbook/passstore/passstore_controller_inventory.yml \ - -e vm_name=my-host \ - -e pass_provider=openstack \ - -e k8s_type=masters \ - -e k8s_version=124 \ - -e operation=create ----- +[WARNING] +==== +Although the `hetzner` provided is also implemented it has been abandoned and will be removed from the pass database. +==== -In the pass database we can now see the following structure. +| Level 3 +| Directory +| VM +| Name that will be given to the VM. It will also be used as the +Ansible Host name. -[source] ----- -├── openstack -... -│ ├── snowdrop-k8s -│ │ ├── ansible_ssh_port -│ │ ├── groups -│ │ │ ├── k8s_124 -│ │ │ └── masters -│ │ ├── os_password -│ │ ├── os_user -│ │ └── ssh_port ----- +| Level 3 +| Entry +| Ansible attributes +| One entry for each Ansible attribute that will be translated into a +environment variable. -This host has already been added to the `masters` and `k8s_115` groups -as parte of the script. +| Level 3 +| Directory +| `groups` +| List of groups into which the host will be associated to. -== Remove host from inventory +| Level 4 (under `groups`) +| Entries +| `group name` +| One entry for each group the host will be associated with. -To remove a host from the passwordstore inventory use the following playbook. +The entry name is the group name. -[source,bash] ----- -ansible-playbook ansible/playbook/passstore/passstore_controller_inventory.yml -e vm_name=my-host -e pass_provider=openstack ----- +|=== -== Host-Group Association -Once the host is in the inventory it can be associated with groups. +=== Ansible Inventory -For instance, to install k8s control plane for version 1.15 in a newly -created host (`my-host` in this example) we have to to add that host to -the `masters` and `k8s_115` groups. To perform this operation use the -`passstore_manage_host_groups.yml` playbook, as shown in the following -example. +The ansible inventory obtained from the pass database is built using the python +script located at link:../ansible/inventory/pass_inventory.py[../ansible/inventory/pass_inventory.py]. -Add a host to the `masters` group and to the `k8s_115` group. +== Ansible Playbooks -[source,bash] ----- -ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=masters -e vm_name=my-host -ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=k8s_115 -e vm_name=my-host ----- - -To remove a host from the `k8s_115` group. - -[source,bash] ----- -ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=remove -e group_name=k8s_115 -e vm_name=my-host ----- - -More information on how hosts are assigned to groups and actually adding -and removing hosts from groups -link:../ansible/playbook/README.md#groups[here]. +Information on the available playbooks is available link:../ansible/playbook/passstore/README.adoc[here]. From 0aebb710fe5e1d234655820845324d218f18a7d2 Mon Sep 17 00:00:00 2001 From: Antonio Costa Date: Wed, 30 Aug 2023 15:05:43 +0200 Subject: [PATCH 4/5] docs: improved kubernetes docs --- ansible/README.adoc | 1 + ansible/playbook/kubernetes/README.adoc | 129 ++++++++++++++++- .../playbook/kubernetes/k8s_install_tools.yml | 2 +- kubernetes/README.adoc | 133 +----------------- 4 files changed, 136 insertions(+), 129 deletions(-) diff --git a/ansible/README.adoc b/ansible/README.adoc index 4698ffd7..e3117874 100644 --- a/ansible/README.adoc +++ b/ansible/README.adoc @@ -145,6 +145,7 @@ Finally install the collection. ansible-galaxy collection install $(find . -name snowdrop-cloud_infra-*.tar.gz) --upgrade ---- +[#user-guide] == User Guide Provisioning and accessing a server requires several steps, each of which will be covered in this section. diff --git a/ansible/playbook/kubernetes/README.adoc b/ansible/playbook/kubernetes/README.adoc index 78ca9ae6..c1b0ce75 100644 --- a/ansible/playbook/kubernetes/README.adoc +++ b/ansible/playbook/kubernetes/README.adoc @@ -1,5 +1,132 @@ = Kubernetes Ansible Playbooks +Snowdrop Team (Antonio Costa) +:icons: font +:revdate: {docdate} :toc: left -:description: This document describes Kubernetes specific playbooks. +:description: This document describes OpenStack specific playbooks. +ifdef::env-github[] +:tip-caption: :bulb: +:note-caption: :information_source: +:important-caption: :heavy_exclamation_mark: +:caution-caption: :fire: +:warning-caption: :warning: +endif::[] +== Requirements +=== Environment variables + +.Environment variables +[cols="2,5"] +|=== +| Environment Variable | Description + +| `VM_NAME` + +[.fuchsia]#string# + +[.red]#required# + +a| Name of the VM where the tools will be installed throughout the document + +|=== + +=== Ansible Inventory + +[WARNING] +==== +Be sure that a host group entry exists for the version you +want to install within the `inventory/hosts` file + +[source,yaml] +---- + k8s_121: + vars: + k8s_version: 1.21.4 + k8s_dashboard_version: v2.3.1 +---- +==== + +== Playbooks + +=== Install a Kubernetes cluster using Passwordstore + +Installs a new kubernetes cluster on an existing host using the team +passwordstore as Ansible Inventory source. + +To select which components are to be installed use the following ansible tags. + +[width="100%",cols="25%m,10%c,65%",options="header",] +|=== +| Tag | Always | Description + +| containerd | icon:times[] | Installs link:https://containerd.io/[containerd] as CRI + +| docker | icon:times[] | Installs Docker as CRI + +| ingress | icon:times[] | Installs link:https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress] + +| k8s_cluster | icon:check[] | Installs the Kubernetes cluster + +| k8s_dashboard | icon:times[] | Installs the link:https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/[Kubernetes Dashboard] +|=== + +.Deploy kubernetes on a host installing docker and the Dashboard +[source,bash] +---- +ansible-playbook ansible/playbook/kubernetes/k8s_install_passstore.yml -e vm_name=${VM_NAME} --tags docker,k8s_dashboard +---- + +=== Uninstall a Kubernetes cluster + +To uninstall a kubernetes cluster (kubeadmin, kubelet, ..), execute this +playbook. + +.Delete kubernetes cluster +[source,bash] +---- +ansible-playbook ansible/playbook/kubernetes/k8s_remove.yml -e vm_name=${VM_NAME} +---- + +== Other k8s tools + +Several tools can be installed using the generic install tooks playbook. To deploy other k8s tools. + +=== k8s_issuer_certificate + +[source,bash] +---- +ansible-playbook ansible/playbook/kubernetes/k8s_install_tools.yml -e vm_name=${VM_NAME} -e letsencrypt_env=prod --tags k8s_issuer_certificate +---- + +.k8s_issuer_certificate parameters +[cols="2,5"] +|=== +| Parameter | Description + +| `api_key` + +[.fuchsia]#string# + +[.red]#required# + +a| GoDaddy API key. + +| `api_secret` + +[.fuchsia]#string# + +[.red]#required# + +a| GoDaddy API secretkey. + +| `letsencrypt_env` + +[.fuchsia]#string# + +a| Let's Encrypt environment to use. + +* *`staging` <= Default:* Staging environment +* `prod`: Production environment + +|=== diff --git a/ansible/playbook/kubernetes/k8s_install_tools.yml b/ansible/playbook/kubernetes/k8s_install_tools.yml index b443ca48..c5b7958e 100644 --- a/ansible/playbook/kubernetes/k8s_install_tools.yml +++ b/ansible/playbook/kubernetes/k8s_install_tools.yml @@ -26,7 +26,7 @@ - "api_key is defined and api_secret is defined" fail_msg: - "Required parameters:" - - " vm_name: VM to where the tolls will be deployed" + - " vm_name: VM to where the tools will be deployed" - " state: 'present' to install the tools and 'absent' to remove them" tags: [always] diff --git a/kubernetes/README.adoc b/kubernetes/README.adoc index 109c2541..30a74470 100644 --- a/kubernetes/README.adoc +++ b/kubernetes/README.adoc @@ -32,6 +32,8 @@ Installation Guide section]. === Ansible Inventory +For information related to the Ansible Inventory check the link:../ansible/README.adoc#user-guide[User guide] + In order to execute the installation of k8s several variables must be provided. To standardize the installation several Ansible Groups have been created for different installations. @@ -74,7 +76,7 @@ include::../ansible/inventory/hosts.yml[tag=k8s_version] === Host provisioning -Provisioning a host is done using the appropriate Ansible Playbooks. +Provisioning a host is done using the appropriate Ansible Playbooks, depending on the provider and its out of the scope of this document. The first step is to generate the inventory. More information in the local passwordstore link:../passwordstore/README.adoc[Define host inventory for provisioning] document. @@ -92,133 +94,10 @@ Once the inventory is prepared the host can be created. To provision a RHOS VM c To remove the host from the inventory check the link:../passwordstore/README.adoc[Remove host from inventory] secion from the passwordstore document. -== Installation - -Once the host is defined in the inventory and also provisioned, execute the k8s creation playbook. - -Ansible tags are used to manage which components are to be installed. The tags that can be selected are the following. - -[width="100%",cols="25%m,10%c,65%",options="header",] -|=== -| Tag | Always | Description - -| containerd | icon:times[] | Installs link:https://containerd.io/[containerd] as CRI - -| docker | icon:times[] | Installs Docker as CRI - -| ingress | icon:times[] | Installs link:https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress] - -| k8s_cluster | icon:check[] | Installs the Kubernetes cluster - -| k8s_dashboard | icon:times[] | Installs the link:https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/[Kubernetes Dashboard] -|=== - -.Deploy kubernetes on a host installing docker and the Dashboard -[source,bash] ----- -ansible-playbook ansible/playbook/kubernetes/k8s_install_passstore.yml -e vm_name=${VM_NAME} --tags docker,k8s_dashboard ----- - -[WARNING] -==== -Be sure that a host group entry exists for the version you -want to install within the `inventory/hosts` file +== Available Ansible Playbooks -[source,yaml] ----- - k8s_121: - vars: - k8s_version: 1.21.4 - k8s_dashboard_version: v2.3.1 ----- -==== - -Example for installing a k8s server from scratch using openstack provider -where we will create a VM. - -[source,bash] ----- -VM_NAME=snowdrop-vm \ - && ansible-playbook hetzner/ansible/hetzner-delete-server.yml -e vm_name=${VM_NAME} -e hetzner_context_name=snowdrop \ - ansible-playbook ansible/playbook/passstore_controller_inventory_remove.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner \ - && ansible-playbook hetzner/ansible/hetzner-create-ssh-key.yml -e vm_name=${VM_NAME} \ - && ansible-playbook ansible/playbook/passstore_controller_inventory.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner -e k8s_type=masters -e k8s_version=115 -e operation=create \ - && ansible-playbook hetzner/ansible/hetzner-create-server.yml -e vm_name=${VM_NAME} -e salt_text=$(gpg --gen-random --armor 1 20) -e hetzner_context_name=snowdrop \ - && ansible-playbook ansible/playbook/sec_host.yml -e vm_name=${VM_NAME} -e provider=hetzner \ - && ansible-playbook kubernetes/ansible/k8s.yml --limit ${VM_NAME} ----- - -[NOTE] -==== -Both kubernetes playbooks (`k8s` and `k8s-misc`) can have its host overridden using the `override_host` variable, e.g., -`-e override_host=localhost` to launch it on the controller itself. -==== - -To uninstall a kubernetes cluster (kubeadmin, kubelet, ..), execute this -command. - -.Delete kubernetes cluster -[source,bash] ----- -ansible-playbook ansible/playbook/kubernetes/k8s_remove.yml -e vm_name=${VM_NAME} ----- - -== Other k8s tools - -To deploy other k8s tools. - -.Common parameters -[cols="2,5"] -|=== -| Parameter | Description - -| `vm_name` - -[.fuchsia]#string# - -[.red]#required# - -a| Name of the VM where the tools will be installed. - -|=== - -[source,bash] ----- -ansible-playbook ansible/playbook/kubernetes/k8s_install_tools.yml -e vm_name=${VM_NAME} -e letsencrypt_env=prod --tags k8s_issuer_certificate ----- - - -.k8s_issuer_certificate parameters -[cols="2,5"] -|=== -| Parameter | Description - -| `api_key` - -[.fuchsia]#string# - -[.red]#required# - -a| GoDaddy API key. - -| `api_secret` - -[.fuchsia]#string# - -[.red]#required# - -a| GoDaddy API secretkey. - -| `letsencrypt_env` - -[.fuchsia]#string# - -a| Let's Encrypt environment to use. - -* *`staging` <= Default:* Staging environment -* `prod`: Production environment - -|=== +More information on the available Kubernetes Ansible Playbooks on the +link:../ansible/playbook/kubernetes/README.adoc[Playbook README]. == Troubleshooting From 0936f048f5b5d1b37d7b4e8c14f05968e8607c93 Mon Sep 17 00:00:00 2001 From: Antonio Costa Date: Thu, 7 Sep 2023 09:05:31 +0200 Subject: [PATCH 5/5] replaced references to k8s 1.15 with 1.24 --- ansible/playbook/passstore/README.adoc | 4 ++-- hetzner/scripts/vm-k8s.sh | 2 +- kubernetes/README.adoc | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/playbook/passstore/README.adoc b/ansible/playbook/passstore/README.adoc index b595eb2e..9d83a277 100644 --- a/ansible/playbook/passstore/README.adoc +++ b/ansible/playbook/passstore/README.adoc @@ -78,9 +78,9 @@ ansible-playbook ansible/playbook/passstore/passstore_controller_inventory_remov Once the host is in the inventory it can be associated with groups. -For instance, to install k8s control plane for version 1.15 in a newly +For instance, to install k8s control plane for version 1.24 in a newly created host (`my-host` in this example) we have to to add that host to -the `masters` and `k8s_115` groups. To perform this operation use the +the `masters` and `k8s_124` groups. To perform this operation use the `passstore_manage_host_groups.yml` playbook, as shown in the following example. diff --git a/hetzner/scripts/vm-k8s.sh b/hetzner/scripts/vm-k8s.sh index 77ea8f1c..88d895a9 100755 --- a/hetzner/scripts/vm-k8s.sh +++ b/hetzner/scripts/vm-k8s.sh @@ -36,7 +36,7 @@ ansible-playbook playbook/generate_inventory.yml \ ansible-playbook -i inventory/${IP_HETZNER}_host \ playbook/k8s.yml \ --tags k8s_cluster \ - -e k8s_version=1.15.9 \ + -e k8s_version=1.24.3 \ -e ip_address=${IP_HETZNER} ansible-playbook -i inventory/${IP_HETZNER}_host \ diff --git a/kubernetes/README.adoc b/kubernetes/README.adoc index 30a74470..16a8b534 100644 --- a/kubernetes/README.adoc +++ b/kubernetes/README.adoc @@ -54,9 +54,9 @@ subnet information. |Components |nodes |Kubernetes node. Similar to masters but for k8s nodes. -|Versions |k8s_116 |Information v 1.16 specific +|Versions |k8s_124 |Information v 1.24 specific -|Versions |k8s_115 |Information v 1.15 specific +|Versions |k8s_123 |Information v 1.23 specific |=== Installing kubernetes requires a host to be assigned to 2 groups,