forked from hornjason/ansible-ocp-azure
-
Notifications
You must be signed in to change notification settings - Fork 0
/
vars.yml.example
238 lines (204 loc) · 7.29 KB
/
vars.yml.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
---
# defaults file for azure_infra
state: present
location: eastus
rg: "test"
#######################
# nodes
# will loop thru list create vm-1, vm-2, vm-3
#######################
master_nodes: [1,2,3]
infra_nodes: [1,2,3]
app_nodes: [1,2,3]
# Min of 3 required and add odd nums only for CNS
cns_nodes: [1,2,3]
#######################
# vm sizes (adapt)
# az vm list-sizes -l <region> -o table
#######################
# For a cheaper solution when testing
#vm_size_master: Standard_E2_v3
#vm_size_infra: Standard_E4_v3
#vm_size_node: Standard_E2_v3
#vm_size_cns: Standard_E4_v3
vm_size_master: Standard_D4s_v3
vm_size_infra: Standard_D8s_v3
vm_size_node: Standard_D2s_v3
vm_size_cns: Standard_D8s_v3
vm_size_bastion: Standard_D1
#######################
# SSH user for all VMs created
#######################
admin_user: cloud-user
# This will be added to each instance's authorized_keys file
admin_pubkey: '**add key here**'
# Path for the private key which will be added to localhost ssh_config for proxying through the bastion
admin_privkey: '~/.ssh/id_rsa'
# If set, creates local users with password-less sudo on the bastion.
#bastion_users:
# user1: '**add key here**'
# user2: 'ssh-rsa FDLKJDFLDKJDFLDKJ'
#######################
# Service Principal
#######################
# This is used to template the cloud config file on each node for azure
sp_name: devops
# Place this in vars file that can be encrypted in root of play and not checked into SCM
sp_secret: 'password'
sp_app_id: 'dfdffdfa0103'
##################################
# add tenant and subscription ids
##################################
tenant_id: 'azure tenant id'
subscripton_id: 'azure subscription id'
######################
# registry.io #
#####################
# create a service account at https://access.redhat.com/terms-based-registry/
# add service acct username/password
registry_io_user: 'user'
registry_io_user_pswd: 'password'
#######################
# RHSM
# Username / Password or
# Activation key / OrgId
#######################
rhsm_user: ''
rhsm_pass: ''
rhsm_key: ""
rhsm_org: ""
# Can specify separate pools so only Application nodes use paid subs
# or keep the same if only using one pool
rhsm_broker_pool: ""
rhsm_node_pool: ""
# For openshift contakiner Storage
rhsm_ocs_pool: ""
rhsm_repos:
- rhel-7-server-rpms
- rhel-7-server-extras-rpms
- rhel-7-server-ose-3.11-rpms
- rhel-7-fast-datapath-rpms
- rhel-7-server-ansible-2.6-rpms
bastion_pkgs:
- ansible
- atomic-openshift-clients
- git
- tmux
- screen
#######################
# Network
#######################
vnet_name: "{{ rg }}vnet"
vnet_cidr: "192.168.0.0/16"
master_subnet_cidr: "192.168.0.0/27"
infra_subnet_cidr: "192.168.0.32/27"
cns_subnet_cidr: "192.168.0.64/27"
app_subnet_cidr: "192.168.0.128/25"
api_port: 443
# If this is set, the bastion host is configured with the given static
# IP address. A public IP address is still assigned to the bastion
# because it is required during installation, but can be removed in
# Azure after the installation is done. This is useful in environments
# where the VNet is accessible from a private network.
#bastion_private_ip: 192.168.0.20
# Override the defaults for the SDN cluster network and the services
# network.
#osm_cluster_network_cidr: 10.29.0.0/16
#openshift_portal_net: 10.28.0.0/16
# If this is set to true, the VNet in the given resource group is not
# deleted by the destroy.yml playbook. This can be used in
# environments where a peering agreement or Express Route exists for
# the given resource group, that was configured before the OpenShift
# installation.
vnet_preserve: false
#######################
# LB
#######################
# DNS name for the master load balancer. If not set,
# {{ rg }}.{{ location }}.cloudapp.azure.com will be used.
#master_lb_dns: ""
# If this is set, the master load balancer will be configured as an
# internal Azure load balancer with the given IP address as its
# frontend IP address. There will be no public IP address assigned to
# the load balancer. This is useful in environments where the VNet is
# accessible from a private network.
#
# If this is set, the master_lb_dns parameter also needs to be defined
# and point to this IP address in DNS.
#master_lb_private_ip: 192.168.0.21
# If this is set, Azure creates a DNS record for the public router IP
# address. The public router IP will change if the environment is
# deleted and redeployed, but the DNS name will stay the same.
#
# This can be referenced in a CNAME record in an external DNS
# server. The full DNS name is:
#
# {{ router_lb_dns_label }}.{{ location }}.cloudapp.azure.com
#
#router_lb_dns_label: ocpapplb1
# If this is set, application routes use this domain by default
# instead of nip.io. This usually requires router_lb_dns_label to be
# set, and a wildcard DNS entry to exist for *.app.example.com.
#router_lb_default_subdomain: app.example.com
# registry Storage Account Name
registry_storage_account: "{{ rg | regex_replace('-') }}"
###########################
# DNS
###########################
# Optional dictionary of DNS domains and nameservers to configure on
# the bastion and each node. DNS lookups for the given domains will be
# done against the given nameservers. This can be used in environments
# where a peering agreement or Express Route exists for the given
# resource group
#
#dns_domain_nameservers:
# internal.example.com: 192.168.100.1
#######################
# Custom Named Certs
#######################
# Router
#router_cert: '{"cafile": "/path/to/ca_cert", "certfile": "/path/to/fullchain.cer", "keyfile": "/vagrant/keys/domain.key"}'
## Master
#master_cert: '[{"cafile": "/path/to/ca_cert", "certfile": "/path/to/fullchain.cer", "keyfile": "/path/to/key/domain.key", "names": ["openshift.console.domain.name"]}]'
#######################
# OCP Identity
# Use 'htpasswd -n <user>' to generate password hash. (htpasswd from httpd-tools RPM)
#######################
# Example with admin:changeme
openshift_master_htpasswd_users: {'admin': '$apr1$zAhyA9Ko$rBxBOwAwwtRuuaw8OtCwH0'}
openshift_master_identity_providers: [{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
#
#ocp_admin_passwd: 'redhat'
#######################
# deploy CNS
#######################
deploy_cns: true
deploy_metrics: true
deploy_logging: true
#deploy_prometheus: true
# volume sizing defaults
metrics_volume_size: 25
logging_volume_size: 100
#prometheus_volume_size: 25
# OpenShift Container StorageClass
# Sizes in Gi
# infra usable storage needs to be greater than below calculations
# logging_volume_size * ec2_count_infra) + metrics_volume_size + prometheus_volume_size) * 1.3 + registry_volume_size (if using OCS for registry)
ocs_infra_cluster_allocated_storage: "{{ ((( logging_volume_size|int * infra_nodes|length|int ) + metrics_volume_size|int + prometheus_volume_size|int ) * 1.3 ) | round | int }}"
ocs_infra_cluster_usable_storage: 512
ocs_app_cluster_usable_storage: 100
#######################
# UI customization
#######################
#project_request_message: To create a new project, contact your team administrator.
#######################
#######################
# Don't change
#
# deploy_cns_on_infra = true ... will not create Gluster VMs
#
#
#######################
#######################
bastion: "{{ rg }}b.{{ location }}.cloudapp.azure.com"
deploy_cns_on_infra: true