-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathplatform2024_vm_deploy_docker_elk.yml
148 lines (138 loc) · 6.16 KB
/
platform2024_vm_deploy_docker_elk.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
---
- name: Simple vm deploy docker single node #edit vmname variable - use -l filter to specify cluster vs. full inventory
hosts: edge
vars:
vmname: docker- #random 5 characters will be appended
image_url:
- "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
image_path: "/Users/davedemlow/tmp/" #~/tmp/" #path to download file
connection: local
gather_facts: false
strategy: host_pinned # free #allows each cluster to start next task before all clusters have finished current task
environment: # if set here - hypercore modules will automatically use this for each remote cluster - avoiding need to specify cluster_instance for each test
SC_HOST: "https://{{ inventory_hostname }}"
SC_USERNAME: "{{ scale_user | default('admin') }}"
SC_PASSWORD: "{{ scale_pass | default('admin') }}"
roles:
- url2template
tasks:
- name: generate 5 digit random character password using lower case ans set as variable named ran5
ansible.builtin.set_fact:
ran5: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=5') }}"
- name: Clone and configure ad hoc "{{ vmname }} {{ran5}}"
scale_computing.hypercore.vm_clone:
vm_name: "{{ vmname }}{{ ran5 }}"
source_vm_name: "{{ image_name }}"
tags:
- platform2024
cloud_init:
user_data: |
#cloud-config
password: "password"
chpasswd: { expire: False }
ssh_pwauth: True
ssh_authorized_keys: # Add your ssh public key for publickey authentication
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDihWWhjoPj8KVLtdLDwNJQ71zi9An0iUFjefRWu2Eju [email protected]
disable_root: false # allow ssh root login
ssh_import_id: gh:ddemlow
apt: {sources: {docker.list: {source: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable', keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88}}}
package_update: true
package_upgrade: false
packages: [qemu-guest-agent, docker-ce, docker-ce-cli, docker-compose ]
bootcmd:
- [ sh, -c, 'sudo echo GRUB_CMDLINE_LINUX="nomodeset" >> /etc/default/grub' ]
- [ sh, -c, 'sudo echo GRUB_GFXPAYLOAD_LINUX="1024x768" >> /etc/default/grub' ]
- [ sh, -c, 'sudo echo GRUB_DISABLE_LINUX_UUID=true >> /etc/default/grub' ]
- [ sh, -c, 'sudo update-grub' ]
runcmd:
- sh -c '/usr/sbin/usermod -aG docker ubuntu'
- sh -c 'systemctl, restart, --no-block, qemu-guest-agent'
- sh -c 'cd /home/ubuntu/elk && /usr/bin/docker-compose up -d'
write_files:
- path: /home/ubuntu/elk/docker-compose.yaml
content: |
services:
elasticsearch:
image: elasticsearch:7.16.1
container_name: es
environment:
discovery.type: single-node
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
ports:
- "9200:9200"
- "9300:9300"
healthcheck:
test: ["CMD-SHELL", "curl --silent --fail support-tunnel-legacy:9200/_cluster/health || exit 1"]
interval: 10s
timeout: 10s
retries: 3
networks:
- elastic
logstash:
image: logstash:7.16.1
container_name: log
environment:
discovery.seed_hosts: logstash
LS_JAVA_OPTS: "-Xms512m -Xmx512m"
volumes:
- ./logstash/pipeline/logstash-nginx.config:/usr/share/logstash/pipeline/logstash-nginx.config
- ./logstash/nginx.log:/home/nginx.log
ports:
- "5000:5000/tcp"
- "5000:5000/udp"
- "5044:5044"
- "9600:9600"
depends_on:
- elasticsearch
networks:
- elastic
command: logstash -f /usr/share/logstash/pipeline/logstash-nginx.config
kibana:
image: kibana:7.16.1
container_name: kib
ports:
- "5601:5601"
depends_on:
- elasticsearch
networks:
- elastic
networks:
elastic:
driver: bridge
- path: /etc/systemd/system/[email protected]/override.conf
content: |
[Service]
# Override [email protected] for tty1 to enable autologin
ExecStart=
ExecStart=-/sbin/agetty --autologin ubuntu --noclear %I $TERM
permissions: '0644'
final_message: |
cloud-init has finished
version: $version
timestamp: $timestamp
datasource: $datasource
uptime: $uptime
meta_data: |
dsmode: local
local-hostname: "{{ vmname }}{{ran5}}"
- name: Disk desired configuration for "{{ vmname }}{{ran5}}"
scale_computing.hypercore.vm_disk:
vm_name: "{{ vmname }}{{ran5}}"
items:
- disk_slot: 0
type: virtio_disk
size: "{{ '300 GB' | human_to_bytes }}" # 50GB | human to bytes results in 53.7GB VSD in Hypercore
state: present
- name: Vm desired configuration and state for "{{ vmname }}{{ran5}}"
scale_computing.hypercore.vm_params:
vm_name: "{{vmname}}{{ran5}}"
memory: "{{ '4 GB' | human_to_bytes }}"
description:
tags:
- platform2024
- uiport___5601
- "{{ site_name }}"
- ansible_group__docker # this will create tag used by hypercore inventory plugin when executing towards VM hosts
- SERIAL
vcpu: 8
power_state: start