From cadebed6d841260b4f0e58611c9129ab67ff8d83 Mon Sep 17 00:00:00 2001 From: Glenn Marcy Date: Wed, 22 Dec 2021 14:40:22 -0500 Subject: [PATCH 1/5] refactor: cleanup several areas before gitlab-ci changes This commit includes the following changes: - Change parameter vars passed to the create_managed_service and gather_host_facts tasks to begin with _param_ prefix. - Distribute the juju client key to all cluster nodes for cdk deployments. - Separate kubeinit cluster facts from the facts used for controlling the kubeinit playbook flow. - Check for existing firewalld rules so that we only make changes when necessary. If we do reload firewalld then also reload podman networks to rebuild iptable rules. - Store public key for each host in hostvars. - Update setup tasks to follow these same conventions. --- kubeinit/playbook.yml | 4 +- kubeinit/roles/kubeinit_apache/tasks/main.yml | 8 ++- kubeinit/roles/kubeinit_bind/tasks/main.yml | 8 ++- kubeinit/roles/kubeinit_cdk/tasks/main.yml | 10 +++ .../roles/kubeinit_dnsmasq/tasks/main.yml | 8 ++- .../roles/kubeinit_haproxy/tasks/main.yml | 8 ++- .../kubeinit_libvirt/tasks/create_network.yml | 65 +++++++++++++++---- .../tasks/deploy_centos_guest.yml | 2 + .../tasks/deploy_debian_guest.yml | 2 + .../tasks/deploy_ubuntu_guest.yml | 2 + .../tasks/download_cloud_images.yml | 16 ++--- kubeinit/roles/kubeinit_nexus/tasks/main.yml | 8 ++- .../tasks/cleanup_hypervisors.yml | 16 ++--- .../kubeinit_prepare/tasks/deploy_cluster.yml | 16 ++--- .../tasks/gather_host_facts.yml | 27 ++++---- .../tasks/gather_kubeinit_facts.yml | 14 ++-- .../roles/kubeinit_prepare/tasks/main.yml | 35 ++++++---- .../tasks/post_deployment.yml | 16 ++--- .../tasks/prepare_cluster.yml | 16 ++--- .../kubeinit_prepare/tasks/prepare_groups.yml | 4 +- .../tasks/prepare_hypervisors.yml | 4 +- .../roles/kubeinit_registry/tasks/main.yml | 8 ++- .../tasks/00_create_service_pod.yml | 33 ++++++++-- .../tasks/create_managed_service.yml | 30 ++++----- .../tasks/create_provision_container.yml | 17 +++-- .../roles/kubeinit_services/tasks/main.yml | 16 ++--- .../tasks/prepare_services.yml | 2 +- .../roles/kubeinit_validations/tasks/main.yml | 16 ++--- setup/inventory | 4 +- .../tasks/cleanup_deployment.yml | 16 ++--- .../tasks/gather_host_facts.yml | 18 ++--- .../tasks/gather_setup_facts.yml | 19 ++---- setup/roles/kubeinit_setup/tasks/main.yml | 16 ++--- .../tasks/prepare_environment.yml | 35 +++++----- 34 files changed, 315 insertions(+), 204 deletions(-) diff --git a/kubeinit/playbook.yml b/kubeinit/playbook.yml index 5c8de4518..2b5779725 100644 --- a/kubeinit/playbook.yml +++ b/kubeinit/playbook.yml @@ -64,8 +64,8 @@ tasks_from: prepare_hypervisor.yml public: true vars: - kubeinit_cluster_name: "{{ hostvars['kubeinit-facts'].cluster_name }}" - when: inventory_hostname in hostvars['kubeinit-facts'].hypervisors + kubeinit_cluster_name: "{{ hostvars['kubeinit-cluster-facts'].cluster_name }}" + when: inventory_hostname in hostvars['kubeinit-cluster-facts'].hypervisors - name: Run cluster deployment on prepared hypervisors hosts: localhost diff --git a/kubeinit/roles/kubeinit_apache/tasks/main.yml b/kubeinit/roles/kubeinit_apache/tasks/main.yml index 3aaf4acde..960caf5b9 100644 --- a/kubeinit/roles/kubeinit_apache/tasks/main.yml +++ b/kubeinit/roles/kubeinit_apache/tasks/main.yml @@ -70,9 +70,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_apache_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_apache_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" - name: Clear temp facts ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_bind/tasks/main.yml b/kubeinit/roles/kubeinit_bind/tasks/main.yml index a5699de88..f7dcf9191 100644 --- a/kubeinit/roles/kubeinit_bind/tasks/main.yml +++ b/kubeinit/roles/kubeinit_bind/tasks/main.yml @@ -126,9 +126,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_bind_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_bind_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" - name: Clear temp facts ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_cdk/tasks/main.yml b/kubeinit/roles/kubeinit_cdk/tasks/main.yml index e9deb60d7..c94f1c35c 100644 --- a/kubeinit/roles/kubeinit_cdk/tasks/main.yml +++ b/kubeinit/roles/kubeinit_cdk/tasks/main.yml @@ -68,6 +68,16 @@ changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" +- name: Copy juju client key to cluster nodes + ansible.builtin.shell: | + ssh-copy-id -i ~/.local/share/juju/ssh/juju_id_rsa root@{{ hostvars[item].ansible_host }} + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + loop: "{{ groups['all_cluster_nodes'] + groups['all_extra_nodes'] }}" + delegate_to: "{{ kubeinit_provision_service_node }}" + - name: Bootstrap the CDK controller ansible.builtin.shell: | juju bootstrap --no-gui \ diff --git a/kubeinit/roles/kubeinit_dnsmasq/tasks/main.yml b/kubeinit/roles/kubeinit_dnsmasq/tasks/main.yml index e6a144872..d20278676 100644 --- a/kubeinit/roles/kubeinit_dnsmasq/tasks/main.yml +++ b/kubeinit/roles/kubeinit_dnsmasq/tasks/main.yml @@ -102,9 +102,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_dnsmasq_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_dnsmasq_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" - name: Clear temp facts ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_haproxy/tasks/main.yml b/kubeinit/roles/kubeinit_haproxy/tasks/main.yml index 8f99e6e3c..0221a2d50 100644 --- a/kubeinit/roles/kubeinit_haproxy/tasks/main.yml +++ b/kubeinit/roles/kubeinit_haproxy/tasks/main.yml @@ -93,9 +93,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_haproxy_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_haproxy_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" - name: Clear temp facts ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml b/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml index a45bab650..67a1b6489 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml @@ -21,12 +21,12 @@ tasks_from: cleanup_hypervisors.yml public: true vars: - hypervisors_cleaned: "{{ kubeinit_cluster_facts_name is defined }}" + hypervisors_cleaned: "{{ kubeinit_facts_name is defined }}" when: not hypervisors_cleaned - block: - name: "Stop before 'task-create-network' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-create-network' @@ -130,6 +130,34 @@ delegate_to: "{{ ovn_host }}" when: hostvars[ovn_host].distribution_family == 'Debian' +# +# Configure firewalld as needed +# +- name: Gather current firewalld rules for ovn hosts + ansible.posix.firewalld_info: + active_zones: true + register: _result_firewalld_info + loop: "{{ groups['all_ovn_hosts'] }}" + loop_control: + loop_var: ovn_host + delegate_to: "{{ ovn_host }}" + when: hostvars[ovn_host].firewalld_is_active + +- name: Set fact for expected rich_rule result + ansible.builtin.set_fact: + rich_rule_result: "rule family=\"ipv4\" source address=\"{{ kubeinit_cluster_network }}\" masquerade" + +- name: Check firewalld services and rich_rules for existing entries + ansible.builtin.add_host: + name: "{{ item.ovn_host }}" + add_ovn_rich_rule: "{{ true if (rich_rule_result not in default_zone_info.rich_rules) else false }}" + add_ovn_central_service: "{{ true if ('ovn-central-firewall-service' not in default_zone_info.services) else false }}" + add_ovn_host_service: "{{ true if ('ovn-host-firewall-service' not in default_zone_info.services) else false }}" + reload_firewalld: "{{ true if ('ovn-host-firewall-service' not in default_zone_info.services) else false }}" + loop: "{{ _result_firewalld_info.results }}" + vars: + default_zone_info: "{{ item.firewalld_info.zones[item.firewalld_info.default_zone] }}" + - name: Refresh firewalld services list to pick up ovn services ansible.builtin.command: | firewall-cmd --reload @@ -139,7 +167,7 @@ loop_control: loop_var: ovn_host delegate_to: "{{ ovn_host }}" - when: hostvars[ovn_host].firewalld_is_active + when: hostvars[ovn_host].reload_firewalld | default(false) - name: Enable OVN central in firewalld ansible.posix.firewalld: @@ -148,7 +176,7 @@ state: enabled immediate: true delegate_to: "{{ kubeinit_ovn_central_host }}" - when: hostvars[kubeinit_ovn_central_host].firewalld_is_active + when: hostvars[kubeinit_ovn_central_host].add_ovn_central_service | default(false) - name: Enable OVN NAT in firewalld ansible.posix.firewalld: @@ -157,7 +185,7 @@ state: enabled immediate: true delegate_to: "{{ kubeinit_ovn_central_host }}" - when: hostvars[kubeinit_ovn_central_host].firewalld_is_active + when: hostvars[kubeinit_ovn_central_host].add_ovn_rich_rule | default(false) - name: Enable OVN host in firewalld ansible.posix.firewalld: @@ -169,7 +197,7 @@ loop_control: loop_var: ovn_host delegate_to: "{{ ovn_host }}" - when: hostvars[ovn_host].firewalld_is_active + when: hostvars[ovn_host].add_ovn_host_service | default(false) - name: Refresh firewalld services list ansible.builtin.command: | @@ -180,7 +208,18 @@ loop_control: loop_var: ovn_host delegate_to: "{{ ovn_host }}" - when: hostvars[ovn_host].firewalld_is_active + when: hostvars[ovn_host].reload_firewalld | default(false) + +- name: Reload podman networks + ansible.builtin.command: | + podman network reload --all + register: _result + changed_when: "_result.rc == 0" + loop: "{{ groups['all_ovn_hosts'] }}" + loop_control: + loop_var: ovn_host + delegate_to: "{{ ovn_host }}" + when: hostvars[ovn_host].reload_firewalld | default(false) and hostvars[ovn_host].podman_is_installed | default(false) - name: Only restart if this is the only cluster network block: @@ -521,17 +560,17 @@ - block: - name: Add task-create-network to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-create-network']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-create-network']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-create-network' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml index aaeef1056..c3de6b9a5 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml @@ -217,4 +217,6 @@ name: kubeinit.kubeinit.kubeinit_prepare tasks_from: gather_host_facts.yml public: yes + vars: + _param_gather_host: "{{ kubeinit_deployment_node_name }}" tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml index 78bdb9eec..5106fefe6 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml @@ -241,4 +241,6 @@ name: kubeinit.kubeinit.kubeinit_prepare tasks_from: gather_host_facts.yml public: yes + vars: + _param_gather_host: "{{ kubeinit_deployment_node_name }}" tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml index 8bca498e0..0755077a2 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml @@ -252,4 +252,6 @@ name: kubeinit.kubeinit.kubeinit_prepare tasks_from: gather_host_facts.yml public: yes + vars: + _param_gather_host: "{{ kubeinit_deployment_node_name }}" tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml b/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml index eeb792c40..ead9d9ea8 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml @@ -20,12 +20,12 @@ name: kubeinit.kubeinit.kubeinit_prepare public: true vars: - environment_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + environment_prepared: "{{ kubeinit_facts_name is defined }}" when: not environment_prepared - block: - name: "Stop before 'task-download-images' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-download-images' @@ -96,17 +96,17 @@ - block: - name: Add task-download-images to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-download-images']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-download-images']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-download-images' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_nexus/tasks/main.yml b/kubeinit/roles/kubeinit_nexus/tasks/main.yml index 96f24ce48..630ee23f4 100644 --- a/kubeinit/roles/kubeinit_nexus/tasks/main.yml +++ b/kubeinit/roles/kubeinit_nexus/tasks/main.yml @@ -171,9 +171,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_nexus_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_nexus_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" - name: Clear temp facts ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml b/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml index 1177292d4..3e4e41305 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml @@ -20,12 +20,12 @@ name: kubeinit.kubeinit.kubeinit_prepare public: true vars: - environment_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + environment_prepared: "{{ kubeinit_facts_name is defined }}" when: not environment_prepared - block: - name: "Stop before 'task-cleanup-hypervisors' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-cleanup-hypervisors' @@ -339,17 +339,17 @@ - block: - name: Add task-cleanup-hypervisors to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-cleanup-hypervisors']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-cleanup-hypervisors']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-cleanup-hypervisors' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_prepare/tasks/deploy_cluster.yml b/kubeinit/roles/kubeinit_prepare/tasks/deploy_cluster.yml index 0c48de5ce..49f4c7412 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/deploy_cluster.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/deploy_cluster.yml @@ -21,12 +21,12 @@ tasks_from: prepare_services.yml public: true vars: - services_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + services_prepared: "{{ kubeinit_facts_name is defined }}" when: not services_prepared - block: - name: "Stop before 'task-deploy-cluster' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-deploy-cluster' @@ -40,17 +40,17 @@ - block: - name: Add task-deploy-cluster to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-deploy-cluster']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-deploy-cluster']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-deploy-cluster' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml b/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml index 1ec2751b3..e56a9ab69 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml @@ -14,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -- name: Delegate to kubeinit_deployment_node_name +- name: Delegate to _param_gather_host block: - name: Gather network facts @@ -59,7 +59,7 @@ - name: Fails if OS is not supported ansible.builtin.fail: - msg: "The host \"{{ hostvars[kubeinit_deployment_node_name].ansible_host }}\" needs to be CentOS/RHEL, Fedora, or Debian/Ubuntu" + msg: "The host \"{{ hostvars[_param_gather_host].ansible_host }}\" needs to be CentOS/RHEL, Fedora, or Debian/Ubuntu" when: not distro_family is defined - name: Gather the package facts @@ -68,7 +68,7 @@ - name: Set podman_installed ansible.builtin.set_fact: - podman_installed: "{{ true if ('podman' in ansible_facts.packages) else false }}" + podman_installed: "{{ true if ('podman' in _result_packages.ansible_facts.packages) else false }}" - name: Gather the services facts ansible.builtin.service_facts: @@ -150,22 +150,22 @@ community.libvirt.virt_net: command: list_nets register: _result_nets - when: libvirtd_active and kubeinit_deployment_node_name in groups['hypervisor_hosts'] + when: libvirtd_active and _param_gather_host in groups['hypervisor_hosts'] - name: Get all the libvirt VMs community.libvirt.virt: command: list_vms register: _result_vms - when: libvirtd_active and kubeinit_deployment_node_name in groups['hypervisor_hosts'] + when: libvirtd_active and _param_gather_host in groups['hypervisor_hosts'] - name: Generate an OpenSSH keypair on hypervisor hosts community.crypto.openssh_keypair: path: "~/.ssh/{{ kubeinit_cluster_name }}_id_{{ kubeinit_common_ssh_keytype }}" type: "{{ kubeinit_common_ssh_keytype }}" - comment: "{{ kubeinit_cluster_name }} {{ kubeinit_deployment_node_name }}" + comment: "{{ kubeinit_cluster_name }} {{ _param_gather_host }}" regenerate: 'never' register: _result_keypair - when: kubeinit_deployment_node_name in (['localhost'] + groups['hypervisor_hosts']) + when: _param_gather_host in (['localhost'] + groups['hypervisor_hosts']) - name: Set ssh_host_key_info ansible.builtin.set_fact: @@ -176,7 +176,7 @@ - name: Add ansible facts to hostvars ansible.builtin.add_host: - name: "{{ kubeinit_deployment_node_name }}" + name: "{{ _param_gather_host }}" ansible_default_ipv4_address: "{{ _result_facts.ansible_facts.ansible_default_ipv4.address | default(omit) }}" ansible_hostname: "{{ _result_facts.ansible_facts.ansible_hostname }}" ansible_distribution: "{{ _result_facts.ansible_facts.ansible_distribution }}" @@ -184,7 +184,7 @@ distribution_family: "{{ distro_family }}" ssh_host_key_ecdsa: "{{ ssh_host_key_info | default(omit) }}" libvirt_qemu_user: "{{ qemu_user }}" - os: "{{ hostvars[kubeinit_deployment_node_name].os if (hostvars[kubeinit_deployment_node_name].os is defined) else host_os }}" + os: "{{ hostvars[_param_gather_host].os if (hostvars[_param_gather_host].os is defined) else host_os }}" firewalld_is_active: "{{ firewalld_active }}" podman_is_installed: "{{ podman_installed }}" podman_is_active: "{{ podman_active }}" @@ -192,9 +192,14 @@ libvirt_nets: "{{ _result_nets.list_nets | default([]) }}" libvirt_vms: "{{ _result_vms.list_vms | default([]) }}" public_key: "{{ _result_keypair.public_key | default(omit) }}" - ssh_connection_address: "{{ 'localhost' if (kubeinit_deployment_node_name == 'localhost') else _result_facts.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" + ssh_connection_address: "{{ 'localhost' if (_param_gather_host == 'localhost') else _result_facts.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" runtime_path: "{{ _result_facts.ansible_facts.ansible_env['XDG_RUNTIME_DIR'] | default('') | string }}" + - name: Update kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + when: kubeinit_facts_hostvars is defined + - name: Update kubeinit_cluster_hostvars ansible.builtin.set_fact: kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" @@ -208,4 +213,4 @@ _result_vms: null _result_keypair: null - delegate_to: "{{ kubeinit_deployment_node_name }}" + delegate_to: "{{ _param_gather_host }}" diff --git a/kubeinit/roles/kubeinit_prepare/tasks/gather_kubeinit_facts.yml b/kubeinit/roles/kubeinit_prepare/tasks/gather_kubeinit_facts.yml index 22fc0514b..5e52d3774 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/gather_kubeinit_facts.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/gather_kubeinit_facts.yml @@ -60,9 +60,13 @@ - kubeinit_spec_hypervisors|int >= 0 - kubeinit_spec_hypervisors|int <= (groups['hypervisor_hosts'] | length) +- name: Set hostname we use to set kubeinit facts + ansible.builtin.set_fact: + kubeinit_facts_name: 'kubeinit-facts' + - name: Set hostname we use to set cluster facts ansible.builtin.set_fact: - kubeinit_cluster_facts_name: 'kubeinit-facts' + kubeinit_cluster_facts_name: 'kubeinit-cluster-facts' - name: Add kubeinit_spec facts to cluster facts ansible.builtin.add_host: @@ -216,7 +220,7 @@ ansible.builtin.include_tasks: gather_host_facts.yml loop: "{{ hostvars[kubeinit_cluster_facts_name].hypervisors | union(['localhost']) }}" loop_control: - loop_var: kubeinit_deployment_node_name + loop_var: _param_gather_host - name: Determine the hypervisor target for all inventory nodes ansible.builtin.set_fact: @@ -312,13 +316,13 @@ - block: - name: Add tasks-gather-facts to tasks completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" + name: "{{ kubeinit_facts_name }}" tasks_completed: "{{ ['task-gather-facts'] }}" - block: - name: Stop after 'task-gather-facts' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_cluster_facts_name].tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_facts_name].tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_prepare/tasks/main.yml b/kubeinit/roles/kubeinit_prepare/tasks/main.yml index b128910f1..afbbef8bb 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/main.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/main.yml @@ -18,29 +18,33 @@ - name: Gather hypervisor facts if needed ansible.builtin.include_tasks: prepare_hypervisors.yml vars: - hypervisors_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + hypervisors_prepared: "{{ kubeinit_facts_name is defined }}" when: not hypervisors_prepared - name: Add task-prepare-hypervisors to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ hostvars[kubeinit_cluster_facts_name].tasks_completed | union(['task-prepare-hypervisors']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ hostvars[kubeinit_facts_name].tasks_completed | union(['task-prepare-hypervisors']) }}" - block: - name: Stop after 'task-prepare-hypervisors' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_cluster_facts_name].tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_facts_name].tasks_completed - block: - name: "Stop before 'task-prepare-environment' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-prepare-environment' tags: omit_from_grapher +- name: Define kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + - name: Define cluster fact names needed to prepare groups ansible.builtin.set_fact: kubeinit_cluster_distro: "{{ hostvars[kubeinit_cluster_facts_name].distro }}" @@ -99,7 +103,7 @@ - name: Gather facts from bastion_host if needed ansible.builtin.include_tasks: gather_host_facts.yml vars: - kubeinit_deployment_node_name: "{{ kubeinit_bastion_host }}" + _param_gather_host: "{{ kubeinit_bastion_host }}" tags: omit_from_grapher - name: Generate an OpenSSH keypair bastion host @@ -111,6 +115,11 @@ register: _result_cluster_keypair delegate_to: "{{ kubeinit_bastion_host }}" + - name: Set public_key hostvar for bastion host + ansible.builtin.add_host: + name: "{{ kubeinit_bastion_host }}" + public_key: "{{ _result_cluster_keypair.public_key }}" + - name: Add bastion host public key to cluster authorized_keys ansible.builtin.set_fact: authorized_keys_with_bastion: "{{ kubeinit_cluster_hostvars.authorized_keys | union([_result_cluster_keypair.public_key]) }}" @@ -173,17 +182,17 @@ - block: - name: Add task-prepare-environment to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-prepare-environment']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-prepare-environment']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-prepare-environment' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_prepare/tasks/post_deployment.yml b/kubeinit/roles/kubeinit_prepare/tasks/post_deployment.yml index a8029dd86..7088723ea 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/post_deployment.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/post_deployment.yml @@ -21,12 +21,12 @@ tasks_from: prepare_services.yml public: true vars: - services_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + services_prepared: "{{ kubeinit_facts_name is defined }}" when: not services_prepared - block: - name: "Stop before 'task-post-deployment' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-post-deployment' @@ -41,17 +41,17 @@ - block: - name: Add task-post-deployment to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-post-deployment']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-post-deployment']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-post-deployment' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_prepare/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_prepare/tasks/prepare_cluster.yml index c70550360..cdf240baf 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/prepare_cluster.yml @@ -21,12 +21,12 @@ tasks_from: prepare_services.yml public: true vars: - services_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + services_prepared: "{{ kubeinit_facts_name is defined }}" when: not services_prepared - block: - name: "Stop before 'task-prepare-cluster' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-prepare-cluster' @@ -41,17 +41,17 @@ - block: - name: Add task-prepare-cluster to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-prepare-cluster']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-prepare-cluster']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-prepare-cluster' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml b/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml index ce399f08c..f09547108 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml @@ -26,7 +26,7 @@ fqdn: "{{ item }}.{{ kubeinit_cluster_fqdn }}" target: "{{ hostvars[item].target }}" os: "{{ hostvars[item].os }}" - loop: "{{ groups['controller_nodes'][0:hostvars['kubeinit-facts'].controller_count|int] | default([]) | list }}" + loop: "{{ groups['controller_nodes'][0:hostvars['kubeinit-cluster-facts'].controller_count|int] | default([]) | list }}" - name: Add all compute nodes to the all_compute_nodes group ansible.builtin.add_host: @@ -36,7 +36,7 @@ fqdn: "{{ item }}.{{ kubeinit_cluster_fqdn }}" target: "{{ hostvars[item].target }}" os: "{{ hostvars[item].os }}" - loop: "{{ groups['compute_nodes'][0:hostvars['kubeinit-facts'].compute_count|int] | default([]) | list }}" + loop: "{{ groups['compute_nodes'][0:hostvars['kubeinit-cluster-facts'].compute_count|int] | default([]) | list }}" - name: Add all controller and compute nodes to the all_cluster_nodes group ansible.builtin.add_host: diff --git a/kubeinit/roles/kubeinit_prepare/tasks/prepare_hypervisors.yml b/kubeinit/roles/kubeinit_prepare/tasks/prepare_hypervisors.yml index 7212703ab..5d978ac2a 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/prepare_hypervisors.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/prepare_hypervisors.yml @@ -18,12 +18,12 @@ - name: Gather kubeinit facts if needed ansible.builtin.include_tasks: gather_kubeinit_facts.yml vars: - facts_prepared: "{{ kubeinit_cluster_facts_name is defined }}" + facts_prepared: "{{ kubeinit_facts_name is defined }}" when: not facts_prepared - block: - name: "Stop before 'task-prepare-hypervisors' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-prepare-hypervisors' diff --git a/kubeinit/roles/kubeinit_registry/tasks/main.yml b/kubeinit/roles/kubeinit_registry/tasks/main.yml index 964be737f..267c55690 100644 --- a/kubeinit/roles/kubeinit_registry/tasks/main.yml +++ b/kubeinit/roles/kubeinit_registry/tasks/main.yml @@ -85,9 +85,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_registry_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_registry_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" - name: Clear temp facts ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_services/tasks/00_create_service_pod.yml b/kubeinit/roles/kubeinit_services/tasks/00_create_service_pod.yml index 5b00a8c6c..627da2478 100644 --- a/kubeinit/roles/kubeinit_services/tasks/00_create_service_pod.yml +++ b/kubeinit/roles/kubeinit_services/tasks/00_create_service_pod.yml @@ -70,19 +70,40 @@ ansible.builtin.command: | ssh -i ~/.ssh/{{ kubeinit_cluster_name }}_id_{{ kubeinit_common_ssh_keytype }} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=accept-new -M -S "~/.ssh/cm-%r@%h:%p" -N -f -L "{{ kubeinit_bastion_host_address }}:{{ podman_remote_ssh_port }}:{{ podman_remote_ssh_host }}:22" "{{ podman_intermediate_ssh_host }}" + - name: Gather current firewall rules + ansible.posix.firewalld_info: + active_zones: true + register: _result_firewalld_info + when: hostvars[kubeinit_bastion_host].firewalld_is_active + + - name: Check firewalld ports for existing entries + ansible.builtin.add_host: + name: "{{ kubeinit_bastion_host }}" + add_bastion_ssh_tunnel: "{{ true if (['6222', 'tcp'] not in default_zone_info.ports) else false }}" + reload_firewalld: "{{ true if (['6222', 'tcp'] not in default_zone_info.ports) else false }}" + vars: + default_zone_info: "{{ _result_firewalld_info.firewalld_info.zones[_result_firewalld_info.firewalld_info.default_zone] }}" + - name: Open firewall port 6222 on bastion ansible.posix.firewalld: port: 6222/tcp state: enabled permanent: true - when: hostvars[kubeinit_bastion_host].firewalld_is_active + when: hostvars[kubeinit_bastion_host].add_bastion_ssh_tunnel | default(false) - name: Reload firewalld service - ansible.builtin.shell: | + ansible.builtin.command: | firewall-cmd --reload - args: - executable: /bin/bash - when: hostvars[kubeinit_bastion_host].firewalld_is_active + register: _result + changed_when: "_result.rc == 0" + when: hostvars[kubeinit_bastion_host].reload_firewalld | default(false) + + - name: Reload podman networks + ansible.builtin.command: | + podman network reload --all + register: _result + changed_when: "_result.rc == 0" + when: hostvars[kubeinit_bastion_host].reload_firewalld | default(false) and hostvars[kubeinit_bastion_host].podman_is_installed | default(false) - name: Create route to cluster network on bastion host ansible.builtin.shell: | @@ -91,6 +112,8 @@ ip route add {{ kubeinit_cluster_network }} via {{ hostvars[kubeinit_ovn_central_host].ssh_connection_address }} args: executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" when: false and kubeinit_ovn_central_host not in kubeinit_bastion_host delegate_to: "{{ kubeinit_bastion_host }}" diff --git a/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml b/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml index 888b3959e..f9260c7f2 100644 --- a/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml +++ b/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml @@ -17,36 +17,36 @@ - name: Ensure user specific systemd instance are persistent ansible.builtin.command: | - /usr/bin/loginctl enable-linger {{ kubeinit_service_user }} + /usr/bin/loginctl enable-linger {{ _param_service_user }} register: _result changed_when: "_result.rc == 0" - name: Create systemd user directory ansible.builtin.file: - path: "{{ kubeinit_service_user_dir }}/.config/systemd/user" + path: "{{ _param_service_user_dir }}/.config/systemd/user" state: directory - owner: "{{ kubeinit_service_user }}" - group: "{{ kubeinit_service_user }}" + owner: "{{ _param_service_user }}" + group: "{{ _param_service_user }}" mode: '0775' - name: Copy the podman systemd service file ansible.builtin.copy: content: | [Unit] - Description=Podman {{ kubeinit_services_systemd_service_name }}.service + Description=Podman {{ _param_systemd_service_name }}.service [Service] Restart=on-failure - ExecStart=/usr/bin/podman start {{ kubeinit_services_podman_container_name }} - ExecStop=/usr/bin/podman stop -t 10 {{ kubeinit_services_podman_container_name }} + ExecStart=/usr/bin/podman start {{ _param_podman_container_name }} + ExecStop=/usr/bin/podman stop -t 10 {{ _param_podman_container_name }} SuccessExitStatus=143 KillMode=none Type=forking - PIDFile={{ kubeinit_services_podman_container_pidfile }} + PIDFile={{ _param_podman_container_pidfile }} [Install] WantedBy=default.target - dest: "{{ kubeinit_service_user_dir }}/.config/systemd/user/{{ kubeinit_services_systemd_service_name }}.service" - owner: "{{ kubeinit_service_user }}" - group: "{{ kubeinit_service_user }}" + dest: "{{ _param_service_user_dir }}/.config/systemd/user/{{ _param_systemd_service_name }}.service" + owner: "{{ _param_service_user }}" + group: "{{ _param_service_user }}" mode: '0644' - name: Reload systemd service @@ -56,17 +56,17 @@ environment: DBUS_SESSION_BUS_ADDRESS: "{{ ansible_env.DBUS_SESSION_BUS_ADDRESS|default('unix:path=/run/user/' + ansible_effective_user_id|string + '/bus') }}" -- name: Enable {{ kubeinit_services_systemd_service_name }}.service +- name: Enable {{ _param_systemd_service_name }}.service ansible.builtin.systemd: - name: "{{ kubeinit_services_systemd_service_name }}" + name: "{{ _param_systemd_service_name }}" enabled: yes scope: user environment: DBUS_SESSION_BUS_ADDRESS: "{{ ansible_env.DBUS_SESSION_BUS_ADDRESS|default('unix:path=/run/user/' + ansible_effective_user_id|string + '/bus') }}" -- name: Start {{ kubeinit_services_systemd_service_name }}.service +- name: Start {{ _param_systemd_service_name }}.service ansible.builtin.systemd: - name: "{{ kubeinit_services_systemd_service_name }}" + name: "{{ _param_systemd_service_name }}" state: started scope: user environment: diff --git a/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml b/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml index be0f10bff..8206f34bb 100644 --- a/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml +++ b/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml @@ -148,9 +148,11 @@ tasks_from: create_managed_service.yml public: true vars: - kubeinit_services_systemd_service_name: "{{ kubeinit_provision_service_name }}" - kubeinit_services_podman_container_name: "{{ _result_container_info.container.Name }}" - kubeinit_services_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + _param_service_user_dir: "{{ kubeinit_service_user_dir }}" + _param_service_user: "{{ kubeinit_service_user }}" + _param_systemd_service_name: "{{ kubeinit_provision_service_name }}" + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" delegate_to: "{{ kubeinit_deployment_delegate }}" @@ -346,6 +348,11 @@ ansible_ssh_pipelining: False delegate_to: "{{ kubeinit_provision_service_name }}" +- name: Set public_key hostvar for provision service + ansible.builtin.add_host: + name: "{{ kubeinit_provision_service_node }}" + public_key: "{{ _result_provision_service_keypair.public_key }}" + - name: Add provision service public key to cluster authorized_keys ansible.builtin.set_fact: authorized_keys_with_provision: "{{ kubeinit_cluster_hostvars.authorized_keys | union([_result_provision_service_keypair.public_key]) }}" @@ -388,9 +395,11 @@ delegate_to: "{{ kubeinit_bastion_host }}" - block: - - name: Gather network and host facts for guest + - name: Gather network and host facts for provision service ansible.builtin.include_role: name: kubeinit.kubeinit.kubeinit_prepare tasks_from: gather_host_facts.yml public: yes + vars: + _param_gather_host: "{{ kubeinit_deployment_node_name }}" tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_services/tasks/main.yml b/kubeinit/roles/kubeinit_services/tasks/main.yml index bbc4c47f5..836e83510 100644 --- a/kubeinit/roles/kubeinit_services/tasks/main.yml +++ b/kubeinit/roles/kubeinit_services/tasks/main.yml @@ -21,12 +21,12 @@ tasks_from: create_network.yml public: true vars: - network_created: "{{ kubeinit_cluster_facts_name is defined }}" + network_created: "{{ kubeinit_facts_name is defined }}" when: not network_created - block: - name: "Stop before 'task-create-services' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-create-services' @@ -61,17 +61,17 @@ - block: - name: Add task-create-services to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-create-services']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-create-services']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-create-services' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/kubeinit/roles/kubeinit_services/tasks/prepare_services.yml b/kubeinit/roles/kubeinit_services/tasks/prepare_services.yml index 4acade89a..13b5a78e0 100644 --- a/kubeinit/roles/kubeinit_services/tasks/prepare_services.yml +++ b/kubeinit/roles/kubeinit_services/tasks/prepare_services.yml @@ -61,4 +61,4 @@ tasks_from: gather_host_facts.yml public: yes vars: - kubeinit_deployment_node_name: "{{ kubeinit_provision_service_node }}" + _param_gather_host: "{{ kubeinit_provision_service_node }}" diff --git a/kubeinit/roles/kubeinit_validations/tasks/main.yml b/kubeinit/roles/kubeinit_validations/tasks/main.yml index c2484802f..376c9e34f 100644 --- a/kubeinit/roles/kubeinit_validations/tasks/main.yml +++ b/kubeinit/roles/kubeinit_validations/tasks/main.yml @@ -21,12 +21,12 @@ tasks_from: cleanup_hypervisors.yml public: true vars: - hypervisors_cleaned: "{{ kubeinit_cluster_facts_name is defined }}" + hypervisors_cleaned: "{{ kubeinit_facts_name is defined }}" when: not hypervisors_cleaned - block: - name: "Stop before 'task-run-validations' when requested" - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-run-validations' @@ -48,17 +48,17 @@ - block: - name: Add task-run-validations to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_cluster_facts_name }}" - tasks_completed: "{{ kubeinit_cluster_hostvars.tasks_completed | union(['task-run-validations']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-run-validations']) }}" - - name: Update kubeinit_cluster_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_cluster_hostvars: "{{ hostvars[kubeinit_cluster_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-run-validations' when requested - ansible.builtin.add_host: name="{{ kubeinit_cluster_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_cluster_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/setup/inventory b/setup/inventory index 4de059bf1..e551e51cd 100644 --- a/setup/inventory +++ b/setup/inventory @@ -16,8 +16,8 @@ ansible_ssh_common_args='-o UserKnownHostsFile=/dev/null -o StrictHostKeyCheckin # Inventory variables # -kubeinit_setup_inventory_remote_user=root -kubeinit_setup_inventory_domain=kubeinit.local +kubeinit_inventory_remote_user=root +kubeinit_inventory_setup_domain=kubeinit.local # # Hypervisor host definitions diff --git a/setup/roles/kubeinit_setup/tasks/cleanup_deployment.yml b/setup/roles/kubeinit_setup/tasks/cleanup_deployment.yml index dd8ef16c1..4f681902f 100644 --- a/setup/roles/kubeinit_setup/tasks/cleanup_deployment.yml +++ b/setup/roles/kubeinit_setup/tasks/cleanup_deployment.yml @@ -18,12 +18,12 @@ - name: Prepare environment if needed ansible.builtin.include_tasks: prepare_environment.yml vars: - environment_prepared: "{{ kubeinit_setup_facts_name is defined }}" + environment_prepared: "{{ kubeinit_facts_name is defined }}" when: not environment_prepared - block: - name: "Stop before 'task-cleanup-deployment' when requested" - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-cleanup-deployment' @@ -36,17 +36,17 @@ - block: - name: Add task-cleanup-deployment to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_setup_facts_name }}" - tasks_completed: "{{ kubeinit_setup_hostvars.tasks_completed | union(['task-cleanup-deployment']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-cleanup-deployment']) }}" - - name: Update kubeinit_setup_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_setup_hostvars: "{{ hostvars[kubeinit_setup_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-cleanup-deployment' when requested - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_setup_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/setup/roles/kubeinit_setup/tasks/gather_host_facts.yml b/setup/roles/kubeinit_setup/tasks/gather_host_facts.yml index 9670a14a5..d35b2e23e 100644 --- a/setup/roles/kubeinit_setup/tasks/gather_host_facts.yml +++ b/setup/roles/kubeinit_setup/tasks/gather_host_facts.yml @@ -14,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -- name: Delegate to kubeinit_setup_gather_host +- name: Delegate to _param_gather_host block: - name: Gather network facts @@ -54,7 +54,7 @@ - name: Fails if OS is not supported ansible.builtin.fail: - msg: "The host \"{{ hostvars[kubeinit_setup_gather_host].ansible_host }}\" needs to be CentOS/RHEL, Fedora, or Debian/Ubuntu" + msg: "The host \"{{ hostvars[_param_gather_host].ansible_host }}\" needs to be CentOS/RHEL, Fedora, or Debian/Ubuntu" when: not distro_family is defined - name: Gather the package facts @@ -114,26 +114,26 @@ - name: Add ansible facts to hostvars ansible.builtin.add_host: - name: "{{ kubeinit_setup_gather_host }}" + name: "{{ _param_gather_host }}" ansible_default_ipv4_address: "{{ _result_facts.ansible_facts.ansible_default_ipv4.address | default(omit) }}" ansible_hostname: "{{ _result_facts.ansible_facts.ansible_hostname }}" ansible_distribution: "{{ _result_facts.ansible_facts.ansible_distribution }}" ansible_distribution_major_version: "{{ _result_facts.ansible_facts.ansible_distribution_major_version }}" distribution_family: "{{ distro_family }}" ssh_host_key_ecdsa: "{{ ssh_host_key_info | default(omit) }}" - os: "{{ hostvars[kubeinit_setup_gather_host].os if (hostvars[kubeinit_setup_gather_host].os is defined) else host_os }}" + os: "{{ hostvars[_param_gather_host].os if (hostvars[_param_gather_host].os is defined) else host_os }}" firewalld_is_active: "{{ firewalld_active }}" podman_is_installed: "{{ podman_installed }}" podman_is_active: "{{ podman_active }}" remote_path: "{{ _result_facts.ansible_facts.ansible_env['PATH'] }}" remote_home: "{{ _result_facts.ansible_facts.ansible_env['HOME'] }}" - ssh_connection_address: "{{ 'localhost' if (kubeinit_setup_gather_host == 'localhost') else _result_facts.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" + ssh_connection_address: "{{ 'localhost' if (_param_gather_host == 'localhost') else _result_facts.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" runtime_path: "{{ _result_facts.ansible_facts.ansible_env['XDG_RUNTIME_DIR'] | default('') | string }}" - - name: Update kubeinit_setup_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_setup_hostvars: "{{ hostvars[kubeinit_setup_facts_name] }}" - when: kubeinit_setup_hostvars is defined + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + when: kubeinit_facts_hostvars is defined - name: Clear results ansible.builtin.set_fact: @@ -141,4 +141,4 @@ _result_packages: null _result_services: null - delegate_to: "{{ kubeinit_setup_gather_host }}" + delegate_to: "{{ _param_gather_host }}" diff --git a/setup/roles/kubeinit_setup/tasks/gather_setup_facts.yml b/setup/roles/kubeinit_setup/tasks/gather_setup_facts.yml index a55ef544b..a578d6623 100644 --- a/setup/roles/kubeinit_setup/tasks/gather_setup_facts.yml +++ b/setup/roles/kubeinit_setup/tasks/gather_setup_facts.yml @@ -34,28 +34,23 @@ - name: Set hostname we use to store setup facts ansible.builtin.set_fact: - kubeinit_setup_facts_name: 'kubeinit-setup-facts' + kubeinit_facts_name: 'kubeinit-setup-facts' -- name: Set remote user fact from inventory - ansible.builtin.set_fact: - kubeinit_setup_remote_user: "{{ kubeinit_setup_inventory_remote_user | default('root') }}" - -- name: Add group facts to setup facts +- name: Add remote_user fact ansible.builtin.add_host: - name: "{{ kubeinit_setup_facts_name }}" - remote_user: "{{ kubeinit_setup_remote_user }}" - hypervisors: "{{ groups['hypervisor_hosts'] }}" + name: "{{ kubeinit_facts_name }}" + remote_user: "{{ kubeinit_inventory_remote_user | default('root') }}" - block: - name: Add tasks-gather-facts to tasks completed ansible.builtin.add_host: - name: "{{ kubeinit_setup_facts_name }}" + name: "{{ kubeinit_facts_name }}" tasks_completed: "{{ ['task-gather-facts'] }}" - block: - name: Stop after 'task-gather-facts' when requested - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_setup_facts_name].tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_facts_name].tasks_completed tags: omit_from_grapher diff --git a/setup/roles/kubeinit_setup/tasks/main.yml b/setup/roles/kubeinit_setup/tasks/main.yml index 9eb5462b6..55b49a519 100644 --- a/setup/roles/kubeinit_setup/tasks/main.yml +++ b/setup/roles/kubeinit_setup/tasks/main.yml @@ -18,12 +18,12 @@ - name: Prepare environment if needed ansible.builtin.include_tasks: prepare_environment.yml vars: - environment_prepared: "{{ kubeinit_setup_facts_name is defined }}" + environment_prepared: "{{ kubeinit_facts_name is defined }}" when: not environment_prepared - block: - name: "Stop before 'task-deploy-setup' when requested" - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-deploy-setup' @@ -156,17 +156,17 @@ - block: - name: Add task-deploy-setup to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_setup_facts_name }}" - tasks_completed: "{{ kubeinit_setup_hostvars.tasks_completed | union(['task-deploy-setup']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-deploy-setup']) }}" - - name: Update kubeinit_setup_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_setup_hostvars: "{{ hostvars[kubeinit_setup_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-deploy-setup' when requested - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_setup_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher diff --git a/setup/roles/kubeinit_setup/tasks/prepare_environment.yml b/setup/roles/kubeinit_setup/tasks/prepare_environment.yml index a130e9f46..cc9b248b1 100644 --- a/setup/roles/kubeinit_setup/tasks/prepare_environment.yml +++ b/setup/roles/kubeinit_setup/tasks/prepare_environment.yml @@ -18,24 +18,23 @@ - name: Gather kubeinit setup facts if needed ansible.builtin.include_tasks: gather_setup_facts.yml vars: - facts_prepared: "{{ kubeinit_setup_facts_name is defined }}" + facts_prepared: "{{ kubeinit_facts_name is defined }}" when: not facts_prepared - block: - name: "Stop before 'task-prepare-environment' when requested" - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-prepare-environment' tags: omit_from_grapher -- name: Define kubeinit_setup_hostvars +- name: Define kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_setup_hostvars: "{{ hostvars[kubeinit_setup_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - name: Define setup fact names needed to prepare groups ansible.builtin.set_fact: - kubeinit_setup_hypervisor_count: "{{ kubeinit_setup_hostvars.hypervisors | length }}" kubeinit_setup_host_name: "{{ 'kubeinit-setup' if (('setup_host' not in groups) or ((groups['setup_host'] | list | length) == 0)) else groups['setup_host'][0] }}" @@ -50,8 +49,8 @@ ansible.builtin.add_host: name: "{{ kubeinit_setup_host_name }}" group: setup_host - remote_user: "{{ kubeinit_setup_hostvars.remote_user }}" - ansible_ssh_user: "{{ kubeinit_setup_hostvars.remote_user }}" + remote_user: "{{ kubeinit_facts_hostvars.remote_user }}" + ansible_ssh_user: "{{ kubeinit_facts_hostvars.remote_user }}" ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" - name: Add ansible_host for setup_host if not defined @@ -80,13 +79,13 @@ vars: ansible_ssh_user: "{{ hostvars[groups['setup_host'][0]].remote_user }}" ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" - kubeinit_setup_gather_host: "{{ kubeinit_setup_host_name }}" + _param_gather_host: "{{ kubeinit_setup_host_name }}" - name: Define additional host facts ansible.builtin.set_fact: - kubeinit_setup_host_fqdn: "{{ kubeinit_setup_host_name }}.{{ kubeinit_setup_inventory_domain }}" + kubeinit_setup_host_fqdn: "{{ kubeinit_setup_host_name }}.{{ kubeinit_inventory_setup_domain }}" kubeinit_setup_host_address: "{{ hostvars[kubeinit_setup_host_name].ssh_connection_address }}" - kubeinit_setup_host_user: "{{ kubeinit_setup_hostvars.remote_user }}" + kubeinit_setup_host_user: "{{ kubeinit_facts_hostvars.remote_user }}" kubeinit_setup_keypair_path: "~/.ssh/kubeinit_setup_id_{{ kubeinit_common_ssh_keytype }}" - name: Confirm presence of podman and git packages @@ -108,26 +107,26 @@ - name: Gather hypervisor host facts ansible.builtin.include_tasks: gather_host_facts.yml - loop: "{{ kubeinit_setup_hostvars.hypervisors }}" + loop: "{{ groups['hypervisor_hosts'] }}" loop_control: - loop_var: kubeinit_setup_gather_host + loop_var: _param_gather_host vars: ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" - block: - name: Add task-prepare-environment to tasks_completed ansible.builtin.add_host: - name: "{{ kubeinit_setup_facts_name }}" - tasks_completed: "{{ kubeinit_setup_hostvars.tasks_completed | union(['task-prepare-environment']) }}" + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-prepare-environment']) }}" - - name: Update kubeinit_setup_hostvars + - name: Update kubeinit_facts_hostvars ansible.builtin.set_fact: - kubeinit_setup_hostvars: "{{ hostvars[kubeinit_setup_facts_name] }}" + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" - block: - name: Stop after 'task-prepare-environment' when requested - ansible.builtin.add_host: name="{{ kubeinit_setup_facts_name }}" playbook_terminated=true + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true - name: End play ansible.builtin.meta: end_play - when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_setup_hostvars.tasks_completed + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed tags: omit_from_grapher From 99c871ee40d22124ffd353e1c7b3d3a8ce5f4e23 Mon Sep 17 00:00:00 2001 From: Glenn Marcy Date: Wed, 22 Dec 2021 23:58:01 -0500 Subject: [PATCH 2/5] ci: initial support for gitlab merge requests --- .gitlab-ci.yml | 10 + ci/ansible/group_vars/all.yml | 11 + ci/ansible/inventory | 48 ++ ci/ansible/playbook.yml | 65 ++ .../kubeinit_ci/tasks/cleanup_deployment.yml | 206 +++++ .../tasks/create_managed_service.yml | 73 ++ .../kubeinit_ci/tasks/gather_ci_facts.yml | 56 ++ .../kubeinit_ci/tasks/gather_host_facts.yml | 144 ++++ ci/ansible/roles/kubeinit_ci/tasks/main.yml | 782 ++++++++++++++++++ .../kubeinit_ci/tasks/prepare_environment.yml | 133 +++ .../kubeinit_ci/templates/config.toml.j2 | 5 + .../templates/echo-private-token.sh.j2 | 3 + .../templates/python-gitlab.cfg.j2 | 6 + .../roles/kubeinit_ci/templates/ssh-config.j2 | 5 + ci/gitlab_merge_request.sh | 211 +++++ ci/sanity.sh | 4 +- test-requirements.txt | 1 + tox.ini | 8 +- 18 files changed, 1766 insertions(+), 5 deletions(-) create mode 100644 ci/ansible/group_vars/all.yml create mode 100644 ci/ansible/inventory create mode 100644 ci/ansible/playbook.yml create mode 100644 ci/ansible/roles/kubeinit_ci/tasks/cleanup_deployment.yml create mode 100644 ci/ansible/roles/kubeinit_ci/tasks/create_managed_service.yml create mode 100644 ci/ansible/roles/kubeinit_ci/tasks/gather_ci_facts.yml create mode 100644 ci/ansible/roles/kubeinit_ci/tasks/gather_host_facts.yml create mode 100644 ci/ansible/roles/kubeinit_ci/tasks/main.yml create mode 100644 ci/ansible/roles/kubeinit_ci/tasks/prepare_environment.yml create mode 100644 ci/ansible/roles/kubeinit_ci/templates/config.toml.j2 create mode 100644 ci/ansible/roles/kubeinit_ci/templates/echo-private-token.sh.j2 create mode 100644 ci/ansible/roles/kubeinit_ci/templates/python-gitlab.cfg.j2 create mode 100644 ci/ansible/roles/kubeinit_ci/templates/ssh-config.j2 create mode 100755 ci/gitlab_merge_request.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d02f21e59..c90b2ea73 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -25,3 +25,13 @@ run-multinode: only: variables: - $CLUSTER_TYPE == "multinode" + +merge-request: + stage: test + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' + tags: + - kubeinit-merge-request + script: + - echo "bash -x ./ci/gitlab_merge_request.sh" + - bash -x ./ci/gitlab_merge_request.sh diff --git a/ci/ansible/group_vars/all.yml b/ci/ansible/group_vars/all.yml new file mode 100644 index 000000000..df27f1dc2 --- /dev/null +++ b/ci/ansible/group_vars/all.yml @@ -0,0 +1,11 @@ +--- +# Docker hub login variables +kubeinit_common_docker_username: "{{ lookup('env','KUBEINIT_COMMON_DOCKER_USERNAME') | default ('') }}" +kubeinit_common_docker_password: "{{ lookup('env','KUBEINIT_COMMON_DOCKER_PASSWORD') | default ('') }}" + +kubeinit_common_dns_public: "{{ lookup('env','KUBEINIT_COMMON_DNS_PUBLIC') or '1.1.1.1' }}" + +kubeinit_common_ssh_keytype: "{{ lookup('env','KUBEINIT_COMMON_SSH_KEYTYPE') or 'rsa' }}" + +kubeinit_common_gitlab_runner_registration_token: "{{ lookup('env','KUBEINIT_COMMON_GITLAB_RUNNER_REGISTRATION_TOKEN') | default ('') }}" +kubeinit_common_gitlab_runner_access_token: "{{ lookup('env','KUBEINIT_COMMON_GITLAB_RUNNER_ACCESS_TOKEN') | default ('') }}" diff --git a/ci/ansible/inventory b/ci/ansible/inventory new file mode 100644 index 000000000..afe6c26d4 --- /dev/null +++ b/ci/ansible/inventory @@ -0,0 +1,48 @@ +# +# Common variables for the inventory +# + +[all:vars] + +# +# Internal variables +# + +ansible_python_interpreter=/usr/bin/python3 +ansible_ssh_pipelining=True +ansible_ssh_common_args='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=accept-new' + +# +# Inventory variables +# + +kubeinit_inventory_ci_gitlab_name=homelab +kubeinit_inventory_ci_gitlab_url=http://192.168.222.222:7080 + +kubeinit_inventory_remote_user=root +kubeinit_inventory_ci_domain=kubeinit.local + +# +# Hypervisor host definitions +# + +[hypervisor_hosts] +nyctea ansible_host=192.168.222.201 +tyto ansible_host=192.168.222.202 +strix ansible_host=192.168.222.203 +otus ansible_host=192.168.222.204 + +# +# CI host definition +# + +# This inventory will have one host identified as the ci host. By default, this function will +# be assumed by the first hypervisor host, which is the same behavior as the first commented +# out line. The second commented out line would set the second hypervisor to be the ci host. +# The final commented out line would set the ci host to be a different host that is not being +# used as a hypervisor in this inventory. + +[ci_host] +# kubeinit-ci target=nyctea +# kubeinit-ci target=tyto +# kubeinit-ci ansible_host=192.168.222.214 diff --git a/ci/ansible/playbook.yml b/ci/ansible/playbook.yml new file mode 100644 index 000000000..bc2b62774 --- /dev/null +++ b/ci/ansible/playbook.yml @@ -0,0 +1,65 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Setup KubeInit CI environment + hosts: localhost + become: false + remote_user: root + gather_subset: "!all,network" + pre_tasks: + - name: Check if Ansible meets version requirements. + tags: task_gather_facts + vars: + kubeinit_ansible_min_version: 2.9 + ansible.builtin.assert: + that: "ansible_version.full is version_compare('{{ kubeinit_ansible_min_version }}', '>=')" + msg: > + "You must update Ansible to at least {{ kubeinit_ansible_min_version }} to use KubeInit." + tasks: + - name: Gather facts about the deployment environment + tags: task_gather_facts + block: + - name: task-gather-facts + ansible.builtin.include_role: + name: "kubeinit_ci" + tasks_from: gather_ci_facts.yml + public: true + + - name: Prepare the environment + tags: task_prepare_environment + block: + - name: task-prepare-environment + ansible.builtin.include_role: + name: "kubeinit_ci" + tasks_from: prepare_environment.yml + public: true + + - name: Cleanup any remnants of previous CI deployments + tags: task_cleanup_deployment + block: + - name: task-cleanup-deployment + ansible.builtin.include_role: + name: "kubeinit_ci" + tasks_from: cleanup_deployment.yml + public: true + + - name: Deploy the CI + tags: task_deploy_ci + block: + - name: task-deploy-ci + ansible.builtin.include_role: + name: "kubeinit_ci" + public: true diff --git a/ci/ansible/roles/kubeinit_ci/tasks/cleanup_deployment.yml b/ci/ansible/roles/kubeinit_ci/tasks/cleanup_deployment.yml new file mode 100644 index 000000000..51124337c --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/tasks/cleanup_deployment.yml @@ -0,0 +1,206 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- block: + - name: Prepare environment if needed + ansible.builtin.include_tasks: prepare_environment.yml + vars: + environment_prepared: "{{ kubeinit_facts_name is defined }}" + when: not environment_prepared + + - block: + - name: "Stop before 'task-cleanup-deployment' when requested" + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-cleanup-deployment' + tags: omit_from_grapher + +# +# Cleanup all resources left over from previous CI deployment +# + +- name: Get list of existing remote system connection definitions + ansible.builtin.shell: | + set -eo pipefail + podman --remote system connection list | sed -e 1d -e 's/[* ].*//' + args: + executable: /bin/bash + register: _result_connections + changed_when: "_result_connections.rc == 0" + +# - name: Remove any existing remote system connection definition for ci host +# ansible.builtin.command: | +# podman --remote system connection remove {{ item }} +# loop: "{{ _result_connections.stdout_lines | list }}" +# register: _result +# changed_when: "_result.rc == 0" + +# - name: Reset local ssh keys +# ansible.builtin.known_hosts: +# name: "{{ item[1] }}" +# state: absent +# loop: "{{ kubeinit_facts_hostvars.node_aliases }}" + +# - name: Reset ssh keys in hypervisors +# ansible.builtin.known_hosts: +# name: "{{ node_alias }}" +# state: absent +# loop: "{{ groups['all_hosts'] | product(kubeinit_facts_hostvars.node_aliases | flatten | unique) }}" +# vars: +# kubeinit_deployment_node_name: "{{ item[0] }}" +# node_alias: "{{ item[1] }}" +# delegate_to: "{{ kubeinit_ci_host_name }}" + +- name: Find any CI pods from previous deployments + containers.podman.podman_pod_info: + register: _result_podinfo + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Set facts about those pods + ansible.builtin.set_fact: + orphaned_pod: "{{ pod }}" + loop: "{{ _result_podinfo.pods | default([]) }}" + loop_control: + loop_var: pod + when: pod.Name == kubeinit_ci_pod_name + +- name: Get container info from orphaned CI pod infra container + containers.podman.podman_container_info: + name: "{{ orphaned_pod.InfraContainerID }}" + register: _result_containerinfo + delegate_to: "{{ kubeinit_ci_host_name }}" + when: orphaned_pod is defined + +- name: Set facts about container netns + ansible.builtin.set_fact: + sandbox_key: "{{ _result_containerinfo.containers[0].NetworkSettings.SandboxKey | basename }}" + when: orphaned_pod is defined + +- name: Find any CI pod networks from previous deployments + containers.podman.podman_network_info: + register: _result_netinfo + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Set facts about those networks + ansible.builtin.set_fact: + orphaned_network: "{{ network }}" + loop: "{{ _result_netinfo.networks | default([]) }}" + loop_control: + loop_var: network + when: network.name == kubeinit_ci_bridge_name + +- name: Run gitlab-runner unregister in runner container + ansible.builtin.shell: | + set -eo pipefail + podman --remote exec kubeinit-merge-request-runner gitlab-runner unregister --all-runners || true + args: + executable: /bin/bash + register: _result_connections + changed_when: "_result_connections.rc == 0" + when: orphaned_pod is defined + +- name: Stop and disable user services + ansible.builtin.service: + name: "{{ service_name }}" + scope: user + state: stopped + enabled: false + register: _result_stop_service + failed_when: _result_stop_service is not defined + loop: ["kubeinit-merge-request-runner", "kubeinit-ara-output", "kubeinit-ara-api"] + loop_control: + loop_var: service_name + delegate_to: "{{ kubeinit_ci_host_name }}" + +- name: Remove previous CI podman pod + containers.podman.podman_pod: + name: "{{ kubeinit_ci_pod_name }}" + state: absent + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Remove any previous kubeinit CI podman network + containers.podman.podman_network: + name: "{{ kubeinit_ci_bridge_name }}" + state: absent + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Remove netns for CI pods + community.general.ip_netns: + name: "{{ sandbox_key }}" + state: absent + delegate_to: "{{ kubeinit_ci_host_name }}" + when: sandbox_key is defined + +- name: Find any podman volumes from previous deployments + containers.podman.podman_volume_info: + register: _result_volinfo + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Remove any previous kubeinit podman volumes + containers.podman.podman_volume: + name: "{{ volume_name }}" + state: absent + loop: ["kubeinit-ara-config", "kubeinit-ara-output", "kubeinit-merge-request-runner-config", "kubeinit-runner-builds"] + loop_control: + loop_var: volume_name + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Remove any previous kubeinit buildah containers + ansible.builtin.shell: | + set -eo pipefail + buildah rm --all || true + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- name: Prune container images created for the CI + ansible.builtin.shell: | + set -eo pipefail + podman image prune --filter label=kubeinit-ci-host-name={{ kubeinit_ci_host_name }} --all --force || true + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + delegate_to: "{{ kubeinit_ci_host_name }}" + when: hostvars[kubeinit_ci_host_name].podman_is_installed is defined and hostvars[kubeinit_ci_host_name].podman_is_installed + +- block: + - name: Add task-cleanup-deployment to tasks_completed + ansible.builtin.add_host: + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-cleanup-deployment']) }}" + + - name: Update kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + + - block: + - name: Stop after 'task-cleanup-deployment' when requested + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed + tags: omit_from_grapher diff --git a/ci/ansible/roles/kubeinit_ci/tasks/create_managed_service.yml b/ci/ansible/roles/kubeinit_ci/tasks/create_managed_service.yml new file mode 100644 index 000000000..f9260c7f2 --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/tasks/create_managed_service.yml @@ -0,0 +1,73 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Ensure user specific systemd instance are persistent + ansible.builtin.command: | + /usr/bin/loginctl enable-linger {{ _param_service_user }} + register: _result + changed_when: "_result.rc == 0" + +- name: Create systemd user directory + ansible.builtin.file: + path: "{{ _param_service_user_dir }}/.config/systemd/user" + state: directory + owner: "{{ _param_service_user }}" + group: "{{ _param_service_user }}" + mode: '0775' + +- name: Copy the podman systemd service file + ansible.builtin.copy: + content: | + [Unit] + Description=Podman {{ _param_systemd_service_name }}.service + [Service] + Restart=on-failure + ExecStart=/usr/bin/podman start {{ _param_podman_container_name }} + ExecStop=/usr/bin/podman stop -t 10 {{ _param_podman_container_name }} + SuccessExitStatus=143 + KillMode=none + Type=forking + PIDFile={{ _param_podman_container_pidfile }} + [Install] + WantedBy=default.target + dest: "{{ _param_service_user_dir }}/.config/systemd/user/{{ _param_systemd_service_name }}.service" + owner: "{{ _param_service_user }}" + group: "{{ _param_service_user }}" + mode: '0644' + +- name: Reload systemd service + ansible.builtin.systemd: + daemon_reexec: yes + scope: user + environment: + DBUS_SESSION_BUS_ADDRESS: "{{ ansible_env.DBUS_SESSION_BUS_ADDRESS|default('unix:path=/run/user/' + ansible_effective_user_id|string + '/bus') }}" + +- name: Enable {{ _param_systemd_service_name }}.service + ansible.builtin.systemd: + name: "{{ _param_systemd_service_name }}" + enabled: yes + scope: user + environment: + DBUS_SESSION_BUS_ADDRESS: "{{ ansible_env.DBUS_SESSION_BUS_ADDRESS|default('unix:path=/run/user/' + ansible_effective_user_id|string + '/bus') }}" + +- name: Start {{ _param_systemd_service_name }}.service + ansible.builtin.systemd: + name: "{{ _param_systemd_service_name }}" + state: started + scope: user + environment: + DBUS_SESSION_BUS_ADDRESS: "{{ ansible_env.DBUS_SESSION_BUS_ADDRESS|default('unix:path=/run/user/' + ansible_effective_user_id|string + '/bus') }}" diff --git a/ci/ansible/roles/kubeinit_ci/tasks/gather_ci_facts.yml b/ci/ansible/roles/kubeinit_ci/tasks/gather_ci_facts.yml new file mode 100644 index 000000000..1786fbb13 --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/tasks/gather_ci_facts.yml @@ -0,0 +1,56 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- block: + - block: + - name: "Stop before 'task-gather-facts' when requested" + ansible.builtin.add_host: name='kubeinit-ci-facts' playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-gather-facts' + tags: omit_from_grapher + +# +# Gather kubeinit ci facts +# +- name: Add an explicit localhost entry to hostvars + ansible.builtin.add_host: + name: localhost + ansible_connection: local + ansible_python_interpreter: "{{ ansible_playbook_python }}" + +- name: Set hostname we use to set ci facts + ansible.builtin.set_fact: + kubeinit_facts_name: 'kubeinit-ci-facts' + +- name: Add remote_user fact + ansible.builtin.add_host: + name: "{{ kubeinit_facts_name }}" + remote_user: "{{ kubeinit_inventory_remote_user | default('root') }}" + +- block: + - name: Add tasks-gather-facts to tasks completed + ansible.builtin.add_host: + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ ['task-gather-facts'] }}" + + - block: + - name: Stop after 'task-gather-facts' when requested + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in hostvars[kubeinit_facts_name].tasks_completed + tags: omit_from_grapher diff --git a/ci/ansible/roles/kubeinit_ci/tasks/gather_host_facts.yml b/ci/ansible/roles/kubeinit_ci/tasks/gather_host_facts.yml new file mode 100644 index 000000000..d35b2e23e --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/tasks/gather_host_facts.yml @@ -0,0 +1,144 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Delegate to _param_gather_host + block: + + - name: Gather network facts + ansible.builtin.gather_facts: + gather_subset: "!all,network" + register: _result_facts + + - name: Set distro_family for CentOS + ansible.builtin.set_fact: + distro_family: "CentOS" + host_os: "centos" + when: _result_facts.ansible_facts.ansible_distribution == 'CentOS' + + - name: Set distro_family for RedHat + ansible.builtin.set_fact: + distro_family: "CentOS" + host_os: "redhat" + when: _result_facts.ansible_facts.ansible_distribution == 'RedHat' + + - name: Set distro_family for Fedora + ansible.builtin.set_fact: + distro_family: "Fedora" + host_os: "fedora" + when: _result_facts.ansible_facts.ansible_distribution == 'Fedora' + + - name: Set distro_family for Debian + ansible.builtin.set_fact: + distro_family: "Debian" + host_os: "debian" + when: _result_facts.ansible_facts.ansible_distribution == 'Debian' + + - name: Set distro_family for Ubuntu + ansible.builtin.set_fact: + distro_family: "Debian" + host_os: "ubuntu" + when: _result_facts.ansible_facts.ansible_distribution == 'Ubuntu' + + - name: Fails if OS is not supported + ansible.builtin.fail: + msg: "The host \"{{ hostvars[_param_gather_host].ansible_host }}\" needs to be CentOS/RHEL, Fedora, or Debian/Ubuntu" + when: not distro_family is defined + + - name: Gather the package facts + ansible.builtin.package_facts: + register: _result_packages + + - name: Set podman_installed + ansible.builtin.set_fact: + podman_installed: "{{ true if ('podman' in _result_packages.ansible_facts.packages) else false }}" + + - name: Gather the services facts + ansible.builtin.service_facts: + register: _result_services + + - name: Set firewalld_state to unknown + ansible.builtin.set_fact: + firewalld_state: 'unknown' + + - name: Set firewalld_state when firewalld is defined + ansible.builtin.set_fact: + firewalld_state: "{{ _result_services.ansible_facts.services['firewalld'].state }}" + when: _result_services.ansible_facts.services['firewalld'] is defined + + - name: Set firewalld_state when firewalld.service is defined + ansible.builtin.set_fact: + firewalld_state: "{{ _result_services.ansible_facts.services['firewalld.service'].state }}" + when: _result_services.ansible_facts.services['firewalld.service'] is defined + + - name: Set firewalld_active + ansible.builtin.set_fact: + firewalld_active: "{{ true if firewalld_state == 'running' else false }}" + + - name: Clear podman_state + ansible.builtin.set_fact: + podman_state: '' + + - name: Set podman_state when podman is defined + ansible.builtin.set_fact: + podman_state: "{{ _result_services.ansible_facts.services['podman'].state }}" + when: _result_services.ansible_facts.services['podman'] is defined + + - name: Set podman_state when podman.service is defined + ansible.builtin.set_fact: + podman_state: "{{ _result_services.ansible_facts.services['podman.service'].state }}" + when: _result_services.ansible_facts.services['podman.service'] is defined + + - name: Set podman_active + ansible.builtin.set_fact: + podman_active: "{{ true if podman_state == 'running' else false }}" + + - name: Set ssh_host_key_info + ansible.builtin.set_fact: + ssh_host_key_info: "{{ _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public_keytype }} {{ _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public }}" + when: > + _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public_keytype is defined and + _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public is defined + + - name: Add ansible facts to hostvars + ansible.builtin.add_host: + name: "{{ _param_gather_host }}" + ansible_default_ipv4_address: "{{ _result_facts.ansible_facts.ansible_default_ipv4.address | default(omit) }}" + ansible_hostname: "{{ _result_facts.ansible_facts.ansible_hostname }}" + ansible_distribution: "{{ _result_facts.ansible_facts.ansible_distribution }}" + ansible_distribution_major_version: "{{ _result_facts.ansible_facts.ansible_distribution_major_version }}" + distribution_family: "{{ distro_family }}" + ssh_host_key_ecdsa: "{{ ssh_host_key_info | default(omit) }}" + os: "{{ hostvars[_param_gather_host].os if (hostvars[_param_gather_host].os is defined) else host_os }}" + firewalld_is_active: "{{ firewalld_active }}" + podman_is_installed: "{{ podman_installed }}" + podman_is_active: "{{ podman_active }}" + remote_path: "{{ _result_facts.ansible_facts.ansible_env['PATH'] }}" + remote_home: "{{ _result_facts.ansible_facts.ansible_env['HOME'] }}" + ssh_connection_address: "{{ 'localhost' if (_param_gather_host == 'localhost') else _result_facts.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" + runtime_path: "{{ _result_facts.ansible_facts.ansible_env['XDG_RUNTIME_DIR'] | default('') | string }}" + + - name: Update kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + when: kubeinit_facts_hostvars is defined + + - name: Clear results + ansible.builtin.set_fact: + _result_facts: null + _result_packages: null + _result_services: null + + delegate_to: "{{ _param_gather_host }}" diff --git a/ci/ansible/roles/kubeinit_ci/tasks/main.yml b/ci/ansible/roles/kubeinit_ci/tasks/main.yml new file mode 100644 index 000000000..cd4288022 --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/tasks/main.yml @@ -0,0 +1,782 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- block: + - name: Prepare environment if needed + ansible.builtin.include_tasks: prepare_environment.yml + vars: + environment_prepared: "{{ kubeinit_facts_name is defined }}" + when: not environment_prepared + + - block: + - name: "Stop before 'task-deploy-ci' when requested" + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-deploy-ci' + tags: omit_from_grapher + +- name: Set ssh port to use + ansible.builtin.set_fact: + podman_remote_ssh_port: 22 + +- name: Set the remote end of the tunnel + ansible.builtin.set_fact: + podman_remote_ssh_host: "{{ kubeinit_ci_host_address }}" + +- name: Delegate to localhost + block: + + - name: Stat /etc/localtime + ansible.builtin.stat: + path: "/etc/localtime" + register: _result_localtime + changed_when: "_result_localtime is defined" + + # + # Currently there are two ways of configuring + # the docker.io credentials using the variable + # kubeinit_common_docker_password + # + # If the kubeinit_common_docker_password variable + # is the path of the file containing the password + # we get the password from that file, and in the case + # this path do not exists, then we asume that the + # variable contains the password itself. + # + - name: Check if kubeinit_common_docker_password path exists + ansible.builtin.stat: + path: "{{ kubeinit_common_docker_password }}" + register: _result_passwordfile + no_log: true + when: | + kubeinit_common_docker_username is defined and + kubeinit_common_docker_password is defined and + kubeinit_common_docker_username and + kubeinit_common_docker_password + + - name: Read docker password from file when the variable has the path + ansible.builtin.slurp: + src: "{{ kubeinit_common_docker_password }}" + register: _result_passsword + no_log: true + when: | + kubeinit_common_docker_username is defined and + kubeinit_common_docker_password is defined and + kubeinit_common_docker_username and + kubeinit_common_docker_password and + _result_passwordfile.stat.exists + + - name: Read runner registration_token + ansible.builtin.slurp: + src: "{{ kubeinit_common_gitlab_runner_registration_token }}" + register: _result_registration_token + no_log: true + when: | + kubeinit_common_gitlab_runner_registration_token is defined and kubeinit_common_gitlab_runner_registration_token + + - name: Read runner access token + ansible.builtin.slurp: + src: "{{ kubeinit_common_gitlab_runner_access_token }}" + register: _result_access_token + no_log: true + when: | + kubeinit_common_gitlab_runner_access_token is defined and kubeinit_common_gitlab_runner_access_token + + - name: Generate a local OpenSSH keypair for remote podman access to the ci host + community.crypto.openssh_keypair: + path: "{{ kubeinit_ci_keypair_path }}" + type: "{{ kubeinit_common_ssh_keytype }}" + comment: "{{ kubeinit_ci_host_fqdn }}" + regenerate: 'never' + register: _result_ci_keypair + + delegate_to: localhost + +- name: Delegate to kubeinit_ci_host_name + block: + + - name: Add public key to ci host + ansible.posix.authorized_key: + user: "{{ kubeinit_facts_hostvars.remote_user }}" + key: "{{ _result_ci_keypair.public_key }}" + comment: "{{ _result_ci_keypair.comment }}" + state: present + + - name: Install common requirements + ansible.builtin.package: + name: ["podman", "buildah"] + state: present + become: true + become_user: root + + - name: Podman login to docker.io + containers.podman.podman_login: + username: "{{ kubeinit_common_docker_username }}" + password: "{{ (_result_passsword.content | b64decode | trim) if (_result_passwordfile.stat.exists) else (kubeinit_common_docker_password) }}" + registry: "docker.io" + no_log: true + when: | + kubeinit_common_docker_username is defined and + kubeinit_common_docker_password is defined and + kubeinit_common_docker_username and + kubeinit_common_docker_password + + - name: Clear any reference to docker password + ansible.builtin.set_fact: + _result_passsword: null + no_log: true + when: | + kubeinit_common_docker_username is defined and + kubeinit_common_docker_password is defined and + kubeinit_common_docker_username and + kubeinit_common_docker_password + + - name: Ensure user specific systemd instance are persistent + ansible.builtin.command: loginctl enable-linger {{ kubeinit_ci_host_user }} + register: _result + changed_when: "_result.rc == 0" + + - name: Retrieve remote user runtime path + ansible.builtin.command: loginctl show-user {{ kubeinit_ci_host_user }} -p RuntimePath --value + register: _result_systemd_runtime_path + changed_when: "_result_systemd_runtime_path.rc == 0" + + - name: Enable and start podman.socket + ansible.builtin.systemd: + name: podman.socket + enabled: yes + state: started + scope: user + + - name: Start podman.service + ansible.builtin.systemd: + name: podman.service + state: started + scope: user + + - name: Add remote system connection definition for ci hypervisor + ansible.builtin.command: | + podman --remote system connection add "{{ kubeinit_ci_host_name }}" --identity "{{ kubeinit_ci_keypair_path }}" "ssh://{{ kubeinit_ci_host_user }}@{{ podman_remote_ssh_host }}:{{ podman_remote_ssh_port }}{{ _result_systemd_runtime_path.stdout }}/podman/podman.sock" + register: _result + changed_when: "_result.rc == 0" + delegate_to: localhost + + - name: Create kubeinit-ara-config volume + containers.podman.podman_volume: + name: kubeinit-ara-config + state: present + + - name: Create kubeinit-ara-output volume + containers.podman.podman_volume: + name: kubeinit-ara-output + state: present + + - name: Create kubeinit-runner-builds volume + containers.podman.podman_volume: + name: kubeinit-runner-builds + state: present + + - name: Create ~/gitlab-runner-home folders + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: '0700' + loop: + - "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home" + - "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.gitlab-runner" + - "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.secrets" + - "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.ssh" + - "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/scripts" + + - name: Create /home/gitlab-runner/.python-gitlab.cfg from template + ansible.builtin.template: + src: python-gitlab.cfg.j2 + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.python-gitlab.cfg" + mode: '0644' + + - name: Create /home/gitlab-runner/.gitlab-runner/config.toml from template + ansible.builtin.template: + src: config.toml.j2 + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.gitlab-runner/config.toml" + mode: '0644' + + - name: Create /home/gitlab-runner/.gitlab-runner/runner-host + ansible.builtin.copy: + content: | + {{ kubeinit_ci_host_address }} + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.gitlab-runner/runner-host" + mode: '0644' + + - name: Create /home/gitlab-runner/.gitlab-runner/runner-user + ansible.builtin.copy: + content: | + {{ kubeinit_ci_host_user }} + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.gitlab-runner/runner-user" + mode: '0644' + + - name: Create /home/gitlab-runner/.secrets/gitlab-private-token + ansible.builtin.copy: + content: | + {{ _result_access_token.content | b64decode | trim }} + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.secrets/gitlab-private-token" + mode: '0600' + + - name: Create /home/gitlab-runner/.ssh/config from template + ansible.builtin.template: + src: ssh-config.j2 + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.ssh/config" + mode: '0644' + + - name: Generate an OpenSSH keypair for gitlab-runner + community.crypto.openssh_keypair: + path: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/.ssh/id_{{ kubeinit_common_ssh_keytype }}" + type: "{{ kubeinit_common_ssh_keytype }}" + comment: "{{ kubeinit_ci_host_fqdn }} kubeinit-runner" + regenerate: 'never' + register: _result_ci_keypair + + - name: Add gitlab-runner public key to ci host + ansible.posix.authorized_key: + user: "{{ kubeinit_ci_host_user }}" + key: "{{ _result_ci_keypair.public_key }}" + comment: "{{ _result_ci_keypair.comment }}" + state: present + + - name: Create /home/gitlab-runner/scripts/echo-private-token.sh from template + ansible.builtin.template: + src: echo-private-token.sh.j2 + dest: "{{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home/scripts/echo-private-token.sh" + mode: '0755' + + - name: Create gitlab-runner-home.tgz archive + ansible.builtin.shell: | + set -eo pipefail + (cd {{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home; tar --numeric-owner --owner 999 --group 999 -czvf {{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home.tgz .) + rm -rf {{ hostvars[kubeinit_ci_host_name].remote_home }}/gitlab-runner-home + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + + - name: Remove old image + containers.podman.podman_image: + name: localhost/kubeinit/kubeinit-ara-api + state: absent + + - name: Remove any old buildah container + ansible.builtin.shell: | + set -eo pipefail + if [ "$(buildah ls --filter 'name=kubeinit-ara' --format {% raw %}'{{ .ContainerName }}'{% endraw %})" != "" ] + then + buildah rm kubeinit-ara + fi + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + + - name: Create a new working container image + ansible.builtin.command: buildah from --name kubeinit-ara docker.io/recordsansible/ara-api:latest + register: _result + changed_when: "_result.rc == 0" + + - name: Update existing packages in the image + ansible.builtin.command: buildah run kubeinit-ara -- dnf update -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Install commands and services we will need + ansible.builtin.command: buildah run kubeinit-ara -- dnf install -q -y procps findutils + register: _result + changed_when: "_result.rc == 0" + + - name: Link /etc/localtime to our local timezone + ansible.builtin.command: buildah run kubeinit-ara -- ln -sf {{ _result_localtime.stat.lnk_source }} /etc/localtime + register: _result + changed_when: "_result.rc == 0" + + - name: Set kubeinit-ci-host-name label + ansible.builtin.command: buildah config --label kubeinit-ci-host-name={{ kubeinit_ci_host_name }} kubeinit-ara + register: _result + changed_when: "_result.rc == 0" + + - name: Commit the image + ansible.builtin.command: buildah commit kubeinit-ara kubeinit/kubeinit-ara-api:latest + register: _result + changed_when: "_result.rc == 0" + + - name: Remove the buildah container + ansible.builtin.command: buildah rm kubeinit-ara + register: _result + changed_when: "_result.rc == 0" + + - name: Remove old image + containers.podman.podman_image: + name: localhost/kubeinit/kubeinit-ara-output + state: absent + + - name: Remove any old buildah container + ansible.builtin.shell: | + set -eo pipefail + if [ "$(buildah ls --filter 'name=kubeinit-output-data' --format {% raw %}'{{ .ContainerName }}'{% endraw %})" != "" ] + then + buildah rm kubeinit-output-data + fi + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + + - name: Create a new working container image + ansible.builtin.command: buildah from --name kubeinit-output-data docker.io/httpd:2.4 + register: _result + changed_when: "_result.rc == 0" + + - name: Update existing packages in the image + ansible.builtin.command: buildah run kubeinit-output-data -- apt-get update -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Install commands and services we will need + ansible.builtin.command: buildah run kubeinit-output-data -- apt-get install -q -y procps + register: _result + changed_when: "_result.rc == 0" + + - name: Auto-remove packages + ansible.builtin.command: buildah run kubeinit-output-data -- apt-get autoremove -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Auto-clean packages + ansible.builtin.command: buildah run kubeinit-output-data -- apt-get autoclean -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Clean packages + ansible.builtin.command: buildah run kubeinit-output-data -- apt-get clean -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Link /etc/localtime to our local timezone + ansible.builtin.command: buildah run kubeinit-output-data -- ln -sf {{ _result_localtime.stat.lnk_source }} /etc/localtime + register: _result + changed_when: "_result.rc == 0" + + - name: Link /usr/local/apache2/htdocs/ara-output-data to /opt/output_data/ + ansible.builtin.command: buildah run kubeinit-output-data -- ln -s /opt/output_data/ /usr/local/apache2/htdocs/ara-output-data + register: _result + changed_when: "_result.rc == 0" + + - name: Set httpd ServerName + ansible.builtin.command: buildah run kubeinit-output-data -- sed -i -e 's/^#ServerName .*/ServerName kubeinit-ci-pod/' /usr/local/apache2/conf/httpd.conf + register: _result + changed_when: "_result.rc == 0" + + - name: Set kubeinit-ci-host-name label + ansible.builtin.command: buildah config --label kubeinit-ci-host-name={{ kubeinit_ci_host_name }} kubeinit-output-data + register: _result + changed_when: "_result.rc == 0" + + - name: Commit the image + ansible.builtin.command: buildah commit kubeinit-output-data kubeinit/kubeinit-ara-output:latest + register: _result + changed_when: "_result.rc == 0" + + - name: Remove the buildah container + ansible.builtin.command: buildah rm kubeinit-output-data + register: _result + changed_when: "_result.rc == 0" + + - name: Remove old image + containers.podman.podman_image: + name: localhost/kubeinit/kubeinit-merge-request-runner + state: absent + + - name: Remove any old buildah container + ansible.builtin.shell: | + set -eo pipefail + if [ "$(buildah ls --filter 'name=kubeinit-runner' --format {% raw %}'{{ .ContainerName }}'{% endraw %})" != "" ] + then + buildah rm kubeinit-runner + fi + args: + executable: /bin/bash + register: _result + changed_when: "_result.rc == 0" + + - name: Create a new working container image + ansible.builtin.command: buildah from --name kubeinit-runner docker.io/gitlab/gitlab-runner:latest + register: _result + changed_when: "_result.rc == 0" + + - name: Update existing packages in the image + ansible.builtin.command: buildah run kubeinit-runner -- apt-get update -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Install gnupg for adding kubic repo + ansible.builtin.command: buildah run kubeinit-runner -- apt-get install -q -y gnupg + register: _result + changed_when: "_result.rc == 0" + + - name: Add kubic repo + ansible.builtin.command: buildah run kubeinit-runner -- bash -c '. /etc/os-release; echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list' + register: _result + changed_when: "_result.rc == 0" + + - name: Add kubic repo Release.key + ansible.builtin.command: buildah run kubeinit-runner -- bash -c '. /etc/os-release; curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key" | apt-key add -' + register: _result + changed_when: "_result.rc == 0" + + - name: Update packages in the image + ansible.builtin.command: buildah run kubeinit-runner -- apt-get update -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Install commands and services we will need + ansible.builtin.command: buildah run kubeinit-runner -- apt-get install -q -y python3 python3-pip python3-apt jq vim net-tools + register: _result + changed_when: "_result.rc == 0" + + - name: Auto-remove packages + ansible.builtin.command: buildah run kubeinit-runner -- apt-get autoremove -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Auto-clean packages + ansible.builtin.command: buildah run kubeinit-runner -- apt-get autoclean -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Clean packages + ansible.builtin.command: buildah run kubeinit-runner -- apt-get clean -q -y + register: _result + changed_when: "_result.rc == 0" + + - name: Link /etc/localtime to our local timezone + ansible.builtin.command: buildah run kubeinit-runner -- ln -sf {{ _result_localtime.stat.lnk_source }} /etc/localtime + register: _result + changed_when: "_result.rc == 0" + + - name: Install PyYAML + ansible.builtin.command: buildah run kubeinit-runner -- python3 -m pip install --ignore-installed PyYAML + register: _result + changed_when: "_result.rc == 0" + + - name: Install and upgrade pip + ansible.builtin.command: buildah run kubeinit-runner -- python3 -m pip install --upgrade pip + register: _result + changed_when: "_result.rc == 0" + + - name: Install and upgrade cryptography + ansible.builtin.command: buildah run kubeinit-runner -- python3 -m pip install --upgrade cryptography + register: _result + changed_when: "_result.rc == 0" + + - name: Install other python pip modules + ansible.builtin.command: buildah run kubeinit-runner -- python3 -m pip install --upgrade wheel shyaml ansible netaddr ara requests PyGithub python-gitlab 'pyparsing<3' pybadges 'jinja2<3' urllib3 google-cloud-storage + register: _result + changed_when: "_result.rc == 0" + + - name: Install podman, buildah and skopeo + ansible.builtin.command: buildah run kubeinit-runner -- apt-get install -yq podman buildah skopeo + register: _result + changed_when: "_result.rc == 0" + + - name: Copy contents of generated /home/gitlab-runner archive + ansible.builtin.command: buildah add --quiet --chown 999:999 kubeinit-runner gitlab-runner-home.tgz /home/gitlab-runner + register: _result + changed_when: "_result.rc == 0" + + - name: Set kubeinit-ci-host-name label + ansible.builtin.command: buildah config --label kubeinit-ci-host-name={{ kubeinit_ci_host_name }} kubeinit-runner + register: _result + changed_when: "_result.rc == 0" + + - name: Commit the image + ansible.builtin.command: buildah commit kubeinit-runner kubeinit/kubeinit-merge-request-runner:latest + register: _result + changed_when: "_result.rc == 0" + + - name: Remove the buildah container + ansible.builtin.command: buildah rm kubeinit-runner + register: _result + changed_when: "_result.rc == 0" + + - name: Remove the gitlab-runner-home archive + ansible.builtin.file: + path: gitlab-runner-home.tgz + state: absent + + - name: Create a podman network for the ci containers + containers.podman.podman_network: + name: "{{ kubeinit_ci_bridge_name }}" + disable_dns: true + state: present + + - name: Create a podman pod for the ci containers + containers.podman.podman_pod: + name: "{{ kubeinit_ci_pod_name }}" + network: "{{ kubeinit_ci_bridge_name }}" + hostname: "{{ kubeinit_ci_host_fqdn }}" + dns: "{{ kubeinit_common_dns_public }}" + publish: + - 8080:80/tcp + state: started + register: _result_pod_info + + - name: Gather current firewall rules + ansible.posix.firewalld_info: + active_zones: true + register: _result_firewalld_info + become: true + become_user: root + when: hostvars[kubeinit_ci_host_name].firewalld_is_active + + - name: Check firewalld ports for existing entries + ansible.builtin.add_host: + name: "{{ kubeinit_ci_host_name }}" + add_publish_port: "{{ true if (['8080', 'tcp'] not in default_zone_info.ports) else false }}" + reload_firewalld: "{{ true if (['8080', 'tcp'] not in default_zone_info.ports) else false }}" + become: true + become_user: root + vars: + default_zone_info: "{{ _result_firewalld_info.firewalld_info.zones[_result_firewalld_info.firewalld_info.default_zone] }}" + + - name: Open firewall port 8080 on bastion + ansible.posix.firewalld: + port: 8080/tcp + state: enabled + permanent: true + become: true + become_user: root + when: hostvars[kubeinit_ci_host_name].add_publish_port | default(false) + + - name: Reload firewalld service + ansible.builtin.command: | + firewall-cmd --reload + register: _result + changed_when: "_result.rc == 0" + become: true + become_user: root + when: hostvars[kubeinit_ci_host_name].reload_firewalld | default(false) + + - name: Reload podman networks + ansible.builtin.command: | + podman network reload --all + register: _result + changed_when: "_result.rc == 0" + when: hostvars[kubeinit_ci_host_name].reload_firewalld | default(false) + + - name: Remove any previous kubeinit-ara-api container + containers.podman.podman_container: + name: kubeinit-ara-api + state: absent + + - name: Create kubeinit-ara-api container + containers.podman.podman_container: + name: kubeinit-ara-api + image: kubeinit/kubeinit-ara-api:latest + state: stopped + pod: "{{ kubeinit_ci_pod_name }}" + init: true + cap_add: + - "AUDIT_WRITE" + volumes: + - "kubeinit-ara-config:/opt/ara:z" + - "kubeinit-ara-output:/opt/output_data:z" + - "kubeinit-runner-builds:/home/gitlab-runner/builds:z" + register: _result_container_info + + - name: Create systemd service for podman container + ansible.builtin.include_role: + name: "kubeinit_ci" + tasks_from: create_managed_service.yml + public: true + vars: + _param_service_user_dir: "{{ hostvars[kubeinit_ci_host_name].remote_home }}" + _param_service_user: "{{ kubeinit_ci_host_user }}" + _param_systemd_service_name: kubeinit-ara-api + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + + - name: Clear temp facts + ansible.builtin.set_fact: + _result_container_info: null + + - name: Set ownership of gitlab-runner builds folder + ansible.builtin.command: podman exec kubeinit-ara-api chown 999:999 /home/gitlab-runner/builds + register: _result + changed_when: "_result.rc == 0" + + - name: Remove any previous kubeinit-ara-output container + containers.podman.podman_container: + name: kubeinit-ara-output + state: absent + + - name: Create kubeinit-ara-output container + containers.podman.podman_container: + name: kubeinit-ara-output + image: kubeinit/kubeinit-ara-output:latest + state: stopped + pod: "{{ kubeinit_ci_pod_name }}" + init: true + cap_add: + - "AUDIT_WRITE" + volumes: + - "kubeinit-ara-output:/opt/output_data:z" + register: _result_container_info + + - name: Create systemd service for podman container + ansible.builtin.include_role: + name: "kubeinit_ci" + tasks_from: create_managed_service.yml + public: true + vars: + _param_service_user_dir: "{{ hostvars[kubeinit_ci_host_name].remote_home }}" + _param_service_user: "{{ kubeinit_ci_host_user }}" + _param_systemd_service_name: kubeinit-ara-output + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + + - name: Clear temp facts + ansible.builtin.set_fact: + _result_container_info: null + + - name: Remove any previous kubeinit-merge-request-runner container + containers.podman.podman_container: + name: kubeinit-merge-request-runner + state: absent + + - name: Create kubeinit-merge-request-runner container + containers.podman.podman_container: + name: kubeinit-merge-request-runner + image: kubeinit/kubeinit-merge-request-runner:latest + state: stopped + pod: "{{ kubeinit_ci_pod_name }}" + init: true + cap_add: + - "AUDIT_WRITE" + env: + DEBUG: true + LOG_LEVEL: debug + volumes: + - "{{ _result_systemd_runtime_path.stdout }}/podman/podman.sock:/var/run/docker.sock" + - "kubeinit-ara-output:/opt/output_data:z" + - "kubeinit-runner-builds:/home/gitlab-runner/builds:z" + command: ['run', '--user', 'gitlab-runner', '--working-directory', '/home/gitlab-runner'] + register: _result_container_info + + - name: Create systemd service for podman container + ansible.builtin.include_role: + name: "kubeinit_ci" + tasks_from: create_managed_service.yml + public: true + vars: + _param_service_user_dir: "{{ hostvars[kubeinit_ci_host_name].remote_home }}" + _param_service_user: "{{ kubeinit_ci_host_user }}" + _param_systemd_service_name: kubeinit-merge-request-runner + _param_podman_container_name: "{{ _result_container_info.container.Name }}" + _param_podman_container_pidfile: "{{ _result_container_info.container.ConmonPidFile }}" + + - name: Clear temp facts + ansible.builtin.set_fact: + _result_container_info: null + + delegate_to: "{{ kubeinit_ci_host_name }}" + +- name: Add gitlab-runner public key to hypervisor hosts + ansible.posix.authorized_key: + user: root + key: "{{ _result_ci_keypair.public_key }}" + comment: "{{ _result_ci_keypair.comment }}" + state: present + loop: "{{ groups['hypervisor_hosts'] }}" + loop_control: + loop_var: hypervisor_host + delegate_to: "{{ hypervisor_host }}" + +- name: Add remote container to hosts + ansible.builtin.add_host: + hostname: kubeinit-ara-api + ansible_connection: containers.podman.podman + ansible_python_interpreter: /usr/bin/python3 + ansible_podman_extra_args: --remote --connection "{{ kubeinit_ci_host_name }}" + +- name: Disable pipelining while using podman connector + block: + + - name: Wait for connection to kubeinit-ara-api container + ansible.builtin.wait_for_connection: + connect_timeout: 20 + sleep: 5 + delay: 5 + timeout: 300 + + - name: Run ara-manage migrate + ansible.builtin.command: ara-manage migrate + register: _result + changed_when: "_result.rc == 0" + + vars: + ansible_ssh_pipelining: False + delegate_to: kubeinit-ara-api + +- name: Add remote container to hosts + ansible.builtin.add_host: + hostname: kubeinit-merge-request-runner + ansible_connection: containers.podman.podman + ansible_python_interpreter: /usr/bin/python3 + ansible_podman_extra_args: --remote --connection "{{ kubeinit_ci_host_name }}" + +- name: Disable pipelining while using podman connector + block: + + - name: Wait for connection to kubeinit-merge-request-runner container + ansible.builtin.wait_for_connection: + connect_timeout: 20 + sleep: 5 + delay: 5 + timeout: 300 + + - name: Register runner + ansible.builtin.command: gitlab-runner register --non-interactive --url {{ kubeinit_inventory_ci_gitlab_url }}/ --clone-url {{ kubeinit_inventory_ci_gitlab_url }} --registration-token {{ _result_registration_token.content | b64decode | trim }} --name kubeinit-merge-request-runner --executor shell --tag-list kubeinit-merge-request --output-limit 50000 + register: _result + changed_when: "_result.rc == 0" + + vars: + ansible_ssh_pipelining: False + delegate_to: kubeinit-merge-request-runner + +- block: + - name: Add task-deploy-ci to tasks_completed + ansible.builtin.add_host: + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-deploy-ci']) }}" + + - name: Update kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + + - block: + - name: Stop after 'task-deploy-ci' when requested + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed + tags: omit_from_grapher diff --git a/ci/ansible/roles/kubeinit_ci/tasks/prepare_environment.yml b/ci/ansible/roles/kubeinit_ci/tasks/prepare_environment.yml new file mode 100644 index 000000000..629939da1 --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/tasks/prepare_environment.yml @@ -0,0 +1,133 @@ +--- +# Copyright kubeinit contributors +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- block: + - name: Gather kubeinit ci facts if needed + ansible.builtin.include_tasks: gather_ci_facts.yml + vars: + facts_prepared: "{{ kubeinit_facts_name is defined }}" + when: not facts_prepared + + - block: + - name: "Stop before 'task-prepare-environment' when requested" + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-prepare-environment' + tags: omit_from_grapher + +- name: Define kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + +- name: Define ci fact names needed to prepare groups + ansible.builtin.set_fact: + kubeinit_ci_host_name: + "{{ 'kubeinit-ci' if (('ci_host' not in groups) or ((groups['ci_host'] | list | length) == 0)) else groups['ci_host'][0] }}" + +- name: Add a default entry for the first hypervisor if there are no ci_host members + ansible.builtin.add_host: + name: "{{ kubeinit_ci_host_name }}" + group: ci_host + target: "{{ groups['hypervisor_hosts'][0] }}" + when: "('ci_host' not in groups) or ((groups['ci_host'] | list | length) == 0)" + +- name: Add remote_user for ci_host + ansible.builtin.add_host: + name: "{{ kubeinit_ci_host_name }}" + group: ci_host + remote_user: "{{ kubeinit_facts_hostvars.remote_user }}" + ansible_ssh_user: "{{ kubeinit_facts_hostvars.remote_user }}" + ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" + +- name: Add ansible_host for ci_host if not defined + ansible.builtin.add_host: + name: "{{ kubeinit_ci_host_name }}" + group: ci_host + ansible_host: "{{ hostvars[hostvars[kubeinit_ci_host_name].target].ansible_host }}" + when: "hostvars[kubeinit_ci_host_name].ansible_host is not defined" + +- name: Add target for ci_host if not defined + ansible.builtin.add_host: + name: "{{ kubeinit_ci_host_name }}" + group: ci_host + target: "{{ kubeinit_ci_host_name }}" + when: "hostvars[kubeinit_ci_host_name].target is not defined" + +- name: Check to see if we have access to ci_host + ansible.builtin.ping: + vars: + ansible_ssh_user: "{{ hostvars[groups['ci_host'][0]].remote_user }}" + ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" + delegate_to: "{{ kubeinit_ci_host_name }}" + +- name: Gather facts from ci_host + ansible.builtin.include_tasks: gather_host_facts.yml + vars: + ansible_ssh_user: "{{ hostvars[groups['ci_host'][0]].remote_user }}" + ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" + _param_gather_host: "{{ kubeinit_ci_host_name }}" + +- name: Define additional host facts + ansible.builtin.set_fact: + kubeinit_ci_host_fqdn: "{{ kubeinit_ci_host_name }}.{{ kubeinit_inventory_ci_domain }}" + kubeinit_ci_host_address: "{{ hostvars[kubeinit_ci_host_name].ssh_connection_address }}" + kubeinit_ci_host_user: "{{ kubeinit_facts_hostvars.remote_user }}" + kubeinit_ci_keypair_path: "~/.ssh/kubeinit_ci_id_{{ kubeinit_common_ssh_keytype }}" + kubeinit_ci_bridge_name: kubeinit-ci-bridge + kubeinit_ci_pod_name: kubeinit-ci-pod + +- name: Confirm presence of podman package + ansible.builtin.package_facts: + failed_when: "'podman' not in ansible_facts.packages" + delegate_to: "{{ kubeinit_ci_host_name }}" + when: ansible_check_mode + +- name: Install podman package + ansible.builtin.package: + name: + - podman + state: present + become: true + become_user: root + delegate_to: "{{ kubeinit_ci_host_name }}" + when: not ansible_check_mode + +- name: Gather hypervisor host facts + ansible.builtin.include_tasks: gather_host_facts.yml + loop: "{{ groups['hypervisor_hosts'] }}" + loop_control: + loop_var: _param_gather_host + vars: + ansible_ssh_extra_args: "-i ~/.ssh/id_{{ kubeinit_common_ssh_keytype }}" + +- block: + - name: Add task-prepare-environment to tasks_completed + ansible.builtin.add_host: + name: "{{ kubeinit_facts_name }}" + tasks_completed: "{{ kubeinit_facts_hostvars.tasks_completed | union(['task-prepare-environment']) }}" + + - name: Update kubeinit_facts_hostvars + ansible.builtin.set_fact: + kubeinit_facts_hostvars: "{{ hostvars[kubeinit_facts_name] }}" + + - block: + - name: Stop after 'task-prepare-environment' when requested + ansible.builtin.add_host: name="{{ kubeinit_facts_name }}" playbook_terminated=true + - name: End play + ansible.builtin.meta: end_play + when: kubeinit_stop_after_task is defined and kubeinit_stop_after_task in kubeinit_facts_hostvars.tasks_completed + tags: omit_from_grapher diff --git a/ci/ansible/roles/kubeinit_ci/templates/config.toml.j2 b/ci/ansible/roles/kubeinit_ci/templates/config.toml.j2 new file mode 100644 index 000000000..b6697cd3c --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/templates/config.toml.j2 @@ -0,0 +1,5 @@ +concurrent = 1 +check_interval = 0 + +[session_server] + session_timeout = 1800 diff --git a/ci/ansible/roles/kubeinit_ci/templates/echo-private-token.sh.j2 b/ci/ansible/roles/kubeinit_ci/templates/echo-private-token.sh.j2 new file mode 100644 index 000000000..95b4d1b0f --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/templates/echo-private-token.sh.j2 @@ -0,0 +1,3 @@ +#!/bin/bash + +echo -n $(cat /home/gitlab-runner/.secrets/gitlab-private-token) diff --git a/ci/ansible/roles/kubeinit_ci/templates/python-gitlab.cfg.j2 b/ci/ansible/roles/kubeinit_ci/templates/python-gitlab.cfg.j2 new file mode 100644 index 000000000..11a1f7008 --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/templates/python-gitlab.cfg.j2 @@ -0,0 +1,6 @@ +[global] +default = {{ kubeinit_inventory_ci_gitlab_name }} + +[{{ kubeinit_inventory_ci_gitlab_name }}] +url = {{ kubeinit_inventory_ci_gitlab_url }} +private_token = helper: /home/gitlab-runner/scripts/echo-private-token.sh diff --git a/ci/ansible/roles/kubeinit_ci/templates/ssh-config.j2 b/ci/ansible/roles/kubeinit_ci/templates/ssh-config.j2 new file mode 100644 index 000000000..835c68148 --- /dev/null +++ b/ci/ansible/roles/kubeinit_ci/templates/ssh-config.j2 @@ -0,0 +1,5 @@ +{% for host in groups['hypervisor_hosts'] | list %} +Host {{ host }} + Hostname {{ hostvars[host].ssh_connection_address }} + +{% endfor %} diff --git a/ci/gitlab_merge_request.sh b/ci/gitlab_merge_request.sh new file mode 100755 index 000000000..e71537581 --- /dev/null +++ b/ci/gitlab_merge_request.sh @@ -0,0 +1,211 @@ +#!/bin/bash +set -ex + +############################################################################# +# # +# Copyright kubeinit contributors. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at: # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # +# License for the specific language governing permissions and limitations # +# under the License. # +# # +############################################################################# + +cluster_type=$1 + +mr_iid=$CI_MERGE_REQUEST_IID +project_id=$CI_MERGE_REQUEST_PROJECT_ID +IFS=',' read -ra labels <<< $CI_MERGE_REQUEST_LABELS +commit_sha=$CI_COMMIT_SHA + +KUBEINIT_RUNNER_HOST=$(cat ~/.gitlab-runner/runner-host) +KUBEINIT_RUNNER_USER=$(cat ~/.gitlab-runner/runner-user) + +KUBEINIT_ANSIBLE_VERBOSITY="v" +for label in ${labels[@]}; do + echo $label + if [[ "$label" =~ verbosity=v+ ]]; then + echo "$label is a valid ansible verbosity" + IFS='=' read -ra params <<< $label + KUBEINIT_ANSIBLE_VERBOSITY=${params[1]} + break + else + echo "$label is an invalid ansible verbosity" + fi +done + +KUBEINIT_SPEC_LABEL="" +for label in ${labels[@]}; do + echo $label + if [[ "$label" =~ [a-z0-9.]+-[a-z]+-[1-9]-[0-9]-[1-9]-[ch] ]]; then + echo "$label is a valid ci job label" + IFS='-' read -ra params <<< $label + DISTRO=${params[0]} + DRIVER=${params[1]} + CONTROLLERS=${params[2]} + COMPUTES=${params[3]} + HYPERVISORS=${params[4]} + RUN_MODE=${params[5]} + KUBEINIT_SPEC_LABEL=$label + break + else + echo "$label is an invalid ci job label" + fi +done + +new_labels=() +for label in ${labels[@]}; do + if [[ "$label" != "$KUBEINIT_SPEC_LABEL" ]]; then + new_labels+=($label) + fi +done + +if [[ "${labels[@]}" != "${new_labels[@]}" ]]; then + gitlab project-merge-request update --project-id $project_id --iid $mr_iid --labels $(echo ${new_labels[@]} | tr ' ' ',') +fi + +pwd + +# Install the collection +cd kubeinit +rm -rf ~/.ansible/collections/ansible_collections/kubeinit/kubeinit +ansible-galaxy collection build -v --force --output-path releases/ +ansible-galaxy collection install --force --force-with-deps releases/kubeinit-kubeinit-`cat galaxy.yml | shyaml get-value version`.tar.gz +cd .. + +cat ./kubeinit/inventory || true + +export ANSIBLE_CALLBACK_PLUGINS="$(python3 -m ara.setup.callback_plugins)" +export ANSIBLE_ACTION_PLUGINS="$(python3 -m ara.setup.action_plugins)" +export ANSIBLE_LOAD_CALLBACK_PLUGINS=true +export ARA_API_CLIENT="http" +export ARA_API_SERVER="http://127.0.0.1:8000" + +# +# Install the CLI/agent +# +python3 -m pip install -r ./agent/requirements.txt +KUBEINIT_REVISION="${revision:-ci}" python3 -m pip install --upgrade ./agent + +# +# Check if this is a multicluster deployment +# this means that the distro has a period in +# the name like okd.rke, k8s.rke, or k8s.eks +# +KUBEINIT_SPEC=$KUBEINIT_SPEC_LABEL +if [[ ${DISTRO} == *.* ]] ; then + FIRST_DISTRO="$(cut -d'.' -f1 <<<"${DISTRO}")" + SECOND_DISTRO="$(cut -d'.' -f2 <<<"${DISTRO}")" + + FIRST_KUBEINIT_SPEC="${KUBEINIT_SPEC/${DISTRO}/${FIRST_DISTRO}}" + SECOND_KUBEINIT_SPEC="${KUBEINIT_SPEC/${DISTRO}/${SECOND_DISTRO}}" + KUBEINIT_SPEC="${FIRST_KUBEINIT_SPEC},${SECOND_KUBEINIT_SPEC}" + + # We enable two cluster ids in the inventory for both the cluster name and the network and + # add submariner to the post-deployment services + sed -i -e "/# cluster0/ s/# cluster0/${FIRST_DISTRO}cluster/" kubeinit/inventory + sed -i -e "/# cluster1/ s/# cluster1/${SECOND_DISTRO}cluster/" kubeinit/inventory + sed -i -e "/# kimgtnet/ s/# kimgtnet/kimgtnet/" kubeinit/inventory + + # We will enable only submariner in the + # case of having a multicluster deployment + # for okd.rke + if [[ "$DISTRO" == "okd.rke" ]]; then + sed -i -e "/kubeinit_inventory_post_deployment_services/ s/none/submariner/" kubeinit/inventory + fi +fi + +FAILED="0" +KUBEINIT_SPEC=${KUBEINIT_SPEC//,/ } +ARA_PLAYBOOK_NAME=ci-job-$CI_JOB_ID +ARA_PLAYBOOK_LABEL=ci_job_$CI_JOB_ID + +export > ../export_$ARA_PLAYBOOK_LABEL + +if [[ "$RUN_MODE" == "h" ]]; then + { + for SPEC in $KUBEINIT_SPEC; do + echo "(launch_e2e.sh) ==> Deploying ${SPEC}" + ansible-playbook \ + --user root \ + -${KUBEINIT_ANSIBLE_VERBOSITY:=v} \ + -i ./kubeinit/inventory \ + -e ara_playbook_name=${ARA_PLAYBOOK_NAME}-deployment \ + -e ara_playbook_labels=${ARA_PLAYBOOK_LABEL},${KUBEINIT_SPEC_LABEL},deployment \ + -e kubeinit_spec=${SPEC} \ + ./kubeinit/playbook.yml + done + } || { + echo "(launch_e2e.sh) ==> The deployment failed, we still need to run the cleanup tasks" + FAILED="1" + } + for SPEC in $KUBEINIT_SPEC; do + echo "(launch_e2e.sh) ==> Cleaning ${SPEC}" + ansible-playbook \ + --user root \ + -${KUBEINIT_ANSIBLE_VERBOSITY:=v} \ + -i ./kubeinit/inventory \ + -e ara_playbook_name=${ARA_PLAYBOOK_NAME}-cleanup \ + -e ara_playbook_labels=${ARA_PLAYBOOK_LABEL},${KUBEINIT_SPEC_LABEL},cleanup \ + -e kubeinit_spec=${SPEC} \ + -e kubeinit_stop_after_task=task-cleanup-hypervisors \ + ./kubeinit/playbook.yml + done +else + echo "(launch_e2e.sh) ==> The parameter launch from do not match a valid value [c|h]" + exit 1 +fi + +MR_RESULTS_DIR=/opt/output_data/$mr_iid +RESULTS_DIR=$MR_RESULTS_DIR/$CI_JOB_ID + +cat << EOF > ./ci/generate-ara-output.sh +#!/bin/bash + +set -x + +mkdir -p $MR_RESULTS_DIR +chown 999:999 $MR_RESULTS_DIR + +for id in \$(bash -c "comm -23 <(ara playbook list -f value -c id | sort) <(ara playbook list -f value -c id --label $ARA_PLAYBOOK_LABEL | sort)"); do + ara playbook delete \$id +done +ara-manage generate $RESULTS_DIR + +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's/ARA Records Ansible/KubeInit job report/g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's/ara.readthedocs.io/docs.kubeinit.org/g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's#https://github.com/ansible-community/ara#https://github.com/kubeinit/kubeinit#g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's#https://ara.recordsansible.org#https://kubeinit.org#g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's#ARA Records Ansible and makes it easier to understand and troubleshoot. It is another recursive acronym.#KubeInit helps with the deployment of multiple Kubernetes distributions.#g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's#ara is a free and open source project under the GPLv3 license.#The CI results are rendered using ARA#g' {} \; +find $RESULTS_DIR -type f -exec sed -i -e 's#../static/images/logo.svg#https://raw.githubusercontent.com/Kubeinit/kubeinit/master/images/logo_white.svg#g' {} \; +find $RESULTS_DIR -type f -exec sed -i -e 's#../static/images/favicon.ico#https://raw.githubusercontent.com/Kubeinit/kubeinit/master/images/favicon.ico#g' {} \; +find $RESULTS_DIR -type f -exec sed -i -e 's#static/images/logo.svg#https://raw.githubusercontent.com/Kubeinit/kubeinit/master/images/logo_white.svg#g' {} \; +find $RESULTS_DIR -type f -exec sed -i -e 's#static/images/favicon.ico#https://raw.githubusercontent.com/Kubeinit/kubeinit/master/images/favicon.ico#g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's#placeholderfororiginalurl#ara.recordsansible.org#g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -e 's#>ara #>KubeInit #g' {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -E -e "/href=\".+\">Playbooks/ s/href=\".+\"/href=\"https:\/\/storage.googleapis.com\/kubeinit-ci\/jobs\/${CI_JOB_ID}\/index.html\"/g" {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -E -e "/href=\".+\">Hosts/ s/href=\".+\"/href=\"https:\/\/storage.googleapis.com\/kubeinit-ci\/jobs\/${CI_JOB_ID}\/hosts\/index.html\"/g" {} \; +find $RESULTS_DIR -type f -name '*.html' -exec sed -i -E -e "/class=\"navbar-brand\" href=\".*\">/ s/href=\".*\"/href=\"http:\/\/${KUBEINIT_RUNNER_HOST}:8080\/ara-output-data\/${mr_iid}\/${CI_JOB_ID}\/index.html\"/g" {} \; + +chown -R 999:999 $RESULTS_DIR +EOF +chmod +x ./ci/generate-ara-output.sh + +ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=accept-new $KUBEINIT_RUNNER_USER@$KUBEINIT_RUNNER_HOST podman exec kubeinit-ara-api $(pwd)/ci/generate-ara-output.sh + +gitlab project-merge-request-note create --project-id $project_id --mr-iid $mr_iid --body "Results for merge request job [#$CI_JOB_ID]($CI_JOB_URL) [$KUBEINIT_SPEC_LABEL](http://${KUBEINIT_RUNNER_HOST}:8080/ara-output-data/${mr_iid}/${CI_JOB_ID})." + +if [[ "$FAILED" == "1" ]]; then + echo "(launch_e2e.sh) ==> The deployment command failed, this script must fail" + exit 1 +fi +exit 0 diff --git a/ci/sanity.sh b/ci/sanity.sh index 2ff6dffc4..0dfe12832 100755 --- a/ci/sanity.sh +++ b/ci/sanity.sh @@ -32,8 +32,10 @@ cd ~/.ansible/collections/ansible_collections/kubeinit/kubeinit export HOME=$(eval echo ~$USER) ansible-test sanity \ + --skip-test ansible-doc \ + --skip-test validate-modules \ --skip-test pylint \ --skip-test future-import-boilerplate \ --skip-test shebang \ --skip-test metaclass-boilerplate \ - -v --docker --python 2.7 + -v --docker --python 3.9 diff --git a/test-requirements.txt b/test-requirements.txt index 1a66bfe9d..384234f42 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,4 @@ +ansible==5.1.0 tox ansible-lint cryptography diff --git a/tox.ini b/tox.ini index dabcf0c29..ceaafe7e5 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = linters, py36 +envlist = linters, py39 skipsdist = true # How do you install pywin32 from a binary file in tox on Windows? @@ -35,7 +35,7 @@ basepython = python3 whitelist_externals = bash commands = - bash -c 'find . -not -path "*molecule.yml" -and -path "*roles*.yml" | xargs ansible-lint' + bash -c 'find . -not -path "./.tox/*" -and -not -path "*molecule.yml" -and -path "*roles*.yml" | xargs ansible-lint' [testenv:flake8] basepython = python3 @@ -62,13 +62,13 @@ commands = envdir = {toxworkdir}/linters deps = {[testenv:linters]deps} commands = - bash -c 'find . -not -wholename "*/node_modules/*" -and -not -wholename "*.tox/*" -and -not -wholename "*.test/*" -and -name "*.sh" -print0 | xargs -0 bashate -v --ignore E006' + bash -c 'find . -not -path "./.tox/*" -and -not -wholename "*/node_modules/*" -and -not -wholename "*.test/*" -and -name "*.sh" -print0 | xargs -0 bashate -v --ignore E006' [testenv:yamllint] envdir = {toxworkdir}/linters deps = {[testenv:linters]deps} commands = - bash -c 'find . -not -wholename "*/node_modules/*" -and -not -wholename "*.tox/*" -and -name "*.yml" -print0 | xargs -0 yamllint' + bash -c 'find . -not -path "./.tox/*" -and -not -wholename "*/node_modules/*" -and -name "*.yml" -print0 | xargs -0 yamllint' [testenv:yamlfind] envdir = {toxworkdir}/linters From 86de5bc7dfca85a92f3dda19b96b3da467baf980 Mon Sep 17 00:00:00 2001 From: Glenn Marcy Date: Thu, 23 Dec 2021 01:17:27 -0500 Subject: [PATCH 3/5] fix: handle when firewalld is not active --- kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml b/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml index 67a1b6489..c68601149 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/create_network.yml @@ -157,6 +157,7 @@ loop: "{{ _result_firewalld_info.results }}" vars: default_zone_info: "{{ item.firewalld_info.zones[item.firewalld_info.default_zone] }}" + when: item.firewalld_info is defined - name: Refresh firewalld services list to pick up ovn services ansible.builtin.command: | From bc046498dc2bda3ae6fa3c8fce5908dc16ba76b8 Mon Sep 17 00:00:00 2001 From: Glenn Marcy Date: Thu, 23 Dec 2021 23:28:01 -0500 Subject: [PATCH 4/5] fix: fixes to Dockerfile and centos prereqs This commit updates the Dockerfile to - Set the permission of the /root/.ssh folder to 0700 - Align the .ssh/config contents with the ansible ssh common args Also add jq to the package prereqs for Centos family hosts --- Dockerfile | 5 +++-- kubeinit/roles/kubeinit_libvirt/defaults/main.yml | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2375271b7..44e3cdb61 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,9 +14,10 @@ RUN set -x && \ \ echo "==> Setting up ssh options..." && \ mkdir /root/.ssh && \ + chmod 0700 /root/.ssh && \ echo "Host *" >> /root/.ssh/config && \ - echo " StrictHostKeyChecking no" >> /root/.ssh/config && \ - echo " IdentityFile /root/.ssh/id_rsa" >> /root/.ssh/config && \ + echo " UserKnownHostsFile=/dev/null" >> /root/.ssh/config && \ + echo " StrictHostKeyChecking accept-new" >> /root/.ssh/config && \ \ echo "==> Adding Python runtime and deps..." && \ python3 -m pip install --upgrade --ignore-installed PyYAML && \ diff --git a/kubeinit/roles/kubeinit_libvirt/defaults/main.yml b/kubeinit/roles/kubeinit_libvirt/defaults/main.yml index 42ae07255..ae6552228 100644 --- a/kubeinit/roles/kubeinit_libvirt/defaults/main.yml +++ b/kubeinit/roles/kubeinit_libvirt/defaults/main.yml @@ -98,6 +98,7 @@ kubeinit_libvirt_hypervisor_dependencies: - net-tools - xz - perl-XML-XPath + - jq debian: - sudo - numad From f6881a3958a1576f9d468c9023b324e1913d490a Mon Sep 17 00:00:00 2001 From: Glenn Marcy Date: Fri, 24 Dec 2021 01:15:48 -0500 Subject: [PATCH 5/5] fix: external ingress fedora support --- .../kubeinit_bind/templates/create-external-ingress.sh.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubeinit/roles/kubeinit_bind/templates/create-external-ingress.sh.j2 b/kubeinit/roles/kubeinit_bind/templates/create-external-ingress.sh.j2 index 810246f78..ca097182f 100644 --- a/kubeinit/roles/kubeinit_bind/templates/create-external-ingress.sh.j2 +++ b/kubeinit/roles/kubeinit_bind/templates/create-external-ingress.sh.j2 @@ -12,7 +12,7 @@ KUBEINIT_INGRESS_IP=$(ip route get "8.8.8.8" | grep -Po '(?<=(src )).*(?= uid)') KUBEINIT_HAPROXY_IP={{ kubeinit_haproxy_service_address }} # Install buildah and podman -if [ "$ID" == "centos" ]; then +if [ "$ID" == "centos" -o "$ID" == "fedora" ]; then dnf install -y buildah podman else apt update @@ -97,7 +97,7 @@ if podman pod exists {{ kubeinit_cluster_name }}-ingress-pod; then podman pod rm podman pod create --name {{ kubeinit_cluster_name }}-ingress-pod --dns ${KUBEINIT_INGRESS_IP} --dns 8.8.8.8 --dns-search {{ kubeinit_cluster_fqdn }} # Use overlay for .ssh folder when available -if [ "$ID" == "centos" ]; then +if [ "$ID" == "centos" -o "$ID" == "fedora" ]; then overlay=O else overlay=ro