From bd0023aea5332814431e5c8821c7f28afefd5896 Mon Sep 17 00:00:00 2001
From: Sergei Petrosian Contents
id="toc-ha_cluster_cluster_present">ha_cluster_cluster_present
ha_cluster_start_on_boot
ha_cluster_install_cloud_agents
ha_cluster_fence_agent_packages
ha_cluster_sbd_enabled
ha_cluster_sbd_options
ha_cluster_cluster_properties
ha_cluster_node_options
ha_cluster_cluster_properties
ha_cluster_resource_primitives
ha_cluster_constraints_order
ha_cluster_constraints_ticket
ha_cluster_acls
ha_cluster_qnetd
Contents
id="toc-configuring-cluster-to-use-sbd">Configuring cluster to use
SBD
Contents
Limitations
Limitations
Requirements
@@ -410,6 +421,17 @@ Defined in
start on boot. If set to
false
, cluster services will be
configured not to start on boot.
ha_cluster_install_cloud_agents
boolean, default: false
The role automatically installs needed HA Cluster packages. However,
+resource and fence agents for cloud environments are not installed by
+default on RHEL. If you need those to be installed, set this variable to
+true
. Alternatively, you can specify those packages in ha_cluster_fence_agent_packages
+and ha_cluster_extra_packages
+variables.
ha_cluster_fence_agent_packages
list of fence agent packages to install, default: fence-agents-all, fence-virt
@@ -419,9 +441,9 @@This variable can be used to install additional packages not installed automatically by the role, for example custom resource agents.
-It is possible to specify fence agents here as well. However,
-ha_cluster_fence_agent_packages
is preferred for that, so
-that its default value is overridden.
It is possible to specify fence agents here as well. However, ha_cluster_fence_agent_packages
+is preferred for that, so that its default value is overridden.
ha_cluster_hacluster_password
string, no default - must be specified
@@ -755,57 +777,114 @@ha_cluster_sbd_options
You may take a look at an example.
-Watchdog and SBD devices are configured on a node to node basis in inventory.
-ha_cluster_cluster_properties
structure, default: no properties
-ha_cluster_cluster_properties:
- - attrs:
- - name: property1_name
- value: property1_value
- - name: property2_name
- value: property2_value
List of sets of cluster properties - Pacemaker cluster-wide -configuration. Currently, only one set is supported.
-You may take a look at an -example.
+Watchdog and SBD devices can be configured on a node to node basis in +two variables:
+ha_cluster_node_options
+is a single variable expected to have the same value for all cluster
+nodes. It is a list of dictionaries, each dictionary defines options for
+one node.ha_cluster
+dictionary defines options for one node only. To set different values
+for each node, you define the variable separately for each node.ha_cluster_node_options
structure, default: no node options
-ha_cluster_node_options:
- - node_name: node1
- attributes:
- - attrs:
- - name: attribute1
- value: value1_node1
- - name: attribute2
- value: value2_node1
- - node_name: node2
- attributes:
- - attrs:
- - name: attribute1
- value: value1_node2
- - name: attribute2
- value: value2_node2
ha_cluster_node_options:
+ - node_name: node1
+ pcs_address: node1-address
+ corosync_addresses:
+ - 192.168.1.11
+ - 192.168.2.11
+ sbd_watchdog_modules:
+ - module1
+ - module2
+ sbd_watchdog_modules_blocklist:
+ - module3
+ sbd_watchdog: /dev/watchdog2
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ attributes:
+ - attrs:
+ - name: attribute1
+ value: value1_node1
+ - name: attribute2
+ value: value2_node1
+ - node_name: node2
+ pcs_address: node2-address:2224
+ corosync_addresses:
+ - 192.168.1.12
+ - 192.168.2.12
+ sbd_watchdog_modules:
+ - module1
+ sbd_watchdog_modules_blocklist:
+ - module3
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdw
+ - /dev/vdz
+ attributes:
+ - attrs:
+ - name: attribute1
+ value: value1_node2
+ - name: attribute2
+ value: value2_node2
This variable defines various settings which vary from cluster node -to cluster node. Note: Use an inventory or playbook -hosts to specify which nodes form the cluster. This variable merely sets -options for the specified nodes. The items are as follows:
+to cluster node. +Note: Use an inventory or playbook hosts to specify +which nodes form the cluster. This variable merely sets options for the +specified nodes.
+The items are as follows:
node_name
(mandatory) - Node name.node_name
(mandatory) - Node name. It must match a name
+defined for a node. See also ha_cluster.node_name
.pcs_address
(optional) - Address used by pcs to
+communicate with the node, it can be a name, a FQDN or an IP address.
+Port can be specified as well.corosync_addresses
(optional) - List of addresses used
+by Corosync, all nodes must have the same number of addresses and the
+order of the addresses matters.sbd_watchdog_modules
(optional) - Watchdog kernel
+modules to be loaded (creates /dev/watchdog*
devices).
+Defaults to empty list if not set.sbd_watchdog_modules_blocklist
(optional) - Watchdog
+kernel modules to be unloaded and blocked. Defaults to empty list if not
+set.sbd_watchdog
(optional) - Watchdog device to be used by
+SBD. Defaults to /dev/watchdog
if not set.sbd_devices
(optional) - Devices to use for exchanging
+SBD messages and for monitoring. Defaults to empty list if not set.attributes
(optional) - List of sets of Pacemaker node
attributes for the node. Currently, no more than one set for each node
is supported.You may take a look at examples:
ha_cluster_cluster_properties
structure, default: no properties
+ha_cluster_cluster_properties:
+ - attrs:
+ - name: property1_name
+ value: property1_value
+ - name: property2_name
+ value: property2_value
List of sets of cluster properties - Pacemaker cluster-wide +configuration. Currently, only one set is supported.
+You may take a look at an +example.
+ha_cluster_resource_primitives
structure, default: no resources
ha_cluster_sbd_options
You may take a look at an example.
++
ha_cluster_acls
structure, default: no ACLs
++ha_cluster_acls: + acl_roles: + - id: role-id-1 + description: role description + permissions: + - kind: access-type + xpath: XPath expression + - kind: access-type + reference: cib-element-id + - id: role-id-2 + permissions: + - kind: access-type + xpath: XPath expression + acl_users: + - id: user-name + roles: + - role-id-1 + - role-id-2 + acl_groups: + - id: group-name + roles: + - role-id-2
This variable defines ACLs roles, users and groups.
+The items of
+acl_roles
are as follows:
id
(mandatory) - ID of an ACL role.description
(optional) - Description of the ACL
+role.permissions
(optional) - List of ACL role permissions.
+kind
(mandatory) - The access being granted. Allowed
+values are read
, write
, and
+deny
.xpath
(optional) - An XPath specification selecting an
+XML element in the CIB to which the permission applies. It is mandatory
+to specify exactly one of the items: xpath
or
+reference
.reference
(optional) - The ID of an XML element in the
+CIB to which the permission applies. It is mandatory to specify exactly
+one of the items: xpath
or reference
.
+Note: the ID must exist.The items of acl_users
are as follows:
id
(mandatory) - ID of an ACL user.roles
(optional) - List of ACL role IDs assigned to the
+user.The items of acl_groups
are as follows:
id
(mandatory) - ID of an ACL group.roles
(optional) - List of ACL role IDs assigned to the
+group.Note: Configure cluster property
+enable-acl
to enable ACLs in the cluster:
ha_cluster_cluster_properties:
+ - attrs:
+ - name: enable-acl
+ value: 'true'
You may take a look at an +example.
ha_cluster_qnetd
structure and default value:
-ha_cluster_qnetd:
- present: boolean
- start_on_boot: boolean
- regenerate_keys: boolean
ha_cluster_qnetd:
+ present: boolean
+ start_on_boot: boolean
+ regenerate_keys: boolean
This configures a qnetd host which can then serve as an external quorum device for clusters. The items are as follows:
ha_cluster_qnetd
Nodes' names and addresses can be configured in inventory. This is -optional. If no names or addresses are configured, play's targets will -be used.
+Nodes' names and addresses can be configured in
+ha_cluster
variable, for example in inventory. This is
+optional. Addresses configured in ha_cluster_node_options
+override those configured in ha_cluster
. If no names or
+addresses are configured, play's targets will be used.
Example inventory with targets node1
and
node2
:
all:
- hosts:
- node1:
- ha_cluster:
- node_name: node-A
- pcs_address: node1-address
- corosync_addresses:
- - 192.168.1.11
- - 192.168.2.11
- node2:
- ha_cluster:
- node_name: node-B
- pcs_address: node2-address:2224
- corosync_addresses:
- - 192.168.1.12
- - 192.168.2.12
all:
+ hosts:
+ node1:
+ ha_cluster:
+ node_name: node-A
+ pcs_address: node1-address
+ corosync_addresses:
+ - 192.168.1.11
+ - 192.168.2.11
+ node2:
+ ha_cluster:
+ node_name: node-B
+ pcs_address: node2-address:2224
+ corosync_addresses:
+ - 192.168.1.12
+ - 192.168.2.12
node_name
- the name of a node in a clusterpcs_address
- an address used by pcs to communicate
@@ -1544,35 +1693,38 @@ When using SBD, you may optionally configure watchdog and SBD devices -for each node in inventory. Even though all SBD devices must be shared -to and accessible from all nodes, each node may use different names for -the devices. The loaded watchdog modules and used devices may also be -different for each node. See also SBD -variables.
+for each node inha_cluster
variable, for example in
+inventory. Even though all SBD devices must be shared to and accessible
+from all nodes, each node may use different names for the devices. The
+loaded watchdog modules and used devices may also be different for each
+node. SBD settings defined in ha_cluster_node_options
+override those defined in ha_cluster
. See also SBD variables.
Example inventory with targets node1
and
node2
:
all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - module1
- - module2
- sbd_watchdog: /dev/watchdog2
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - module1
- sbd_watchdog_modules_blocklist:
- - module2
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdw
- - /dev/vdz
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - module1
+ - module2
+ sbd_watchdog: /dev/watchdog2
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - module1
+ sbd_watchdog_modules_blocklist:
+ - module2
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdw
+ - /dev/vdz
sbd_watchdog_modules
(optional) - Watchdog kernel
modules to be loaded (creates /dev/watchdog*
devices).
@@ -1597,593 +1749,697 @@ true
in your playbooks using the ha_cluster
role.
-- name: Manage HA cluster and firewall and selinux
- hosts: node1 node2
- vars:
- ha_cluster_manage_firewall: true
- ha_cluster_manage_selinux: true
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster and firewall and selinux
+ hosts: node1 node2
+ vars:
+ ha_cluster_manage_firewall: true
+ ha_cluster_manage_selinux: true
+
+ roles:
+ - linux-system-roles.ha_cluster
certificate
roleThis example creates self-signed pcsd certificate and private key files in /var/lib/pcsd with the file name FILENAME.crt and FILENAME.key, respectively.
-- name: Manage HA cluster with certificates
- hosts: node1 node2
- vars:
- ha_cluster_pcsd_certificates:
- - name: FILENAME
- common_name: "{{ ansible_hostname }}"
- ca: self-sign
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with certificates
+ hosts: node1 node2
+ vars:
+ ha_cluster_pcsd_certificates:
+ - name: FILENAME
+ common_name: "{{ ansible_hostname }}"
+ ca: self-sign
+ roles:
+ - linux-system-roles.ha_cluster
- name: Manage HA cluster with no resources
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with no resources
+ hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+
+ roles:
+ - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_transport:
- type: knet
- options:
- - name: ip_version
- value: ipv4-6
- - name: link_mode
- value: active
- links:
- -
- - name: linknumber
- value: 1
- - name: link_priority
- value: 5
- -
- - name: linknumber
- value: 0
- - name: link_priority
- value: 10
- compression:
- - name: level
- value: 5
- - name: model
- value: zlib
- crypto:
- - name: cipher
- value: none
- - name: hash
- value: none
- ha_cluster_totem:
- options:
- - name: block_unlisted_ips
- value: 'yes'
- - name: send_join
- value: 0
- ha_cluster_quorum:
- options:
- - name: auto_tie_breaker
- value: 1
- - name: wait_for_all
- value: 1
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
+ hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_transport:
+ type: knet
+ options:
+ - name: ip_version
+ value: ipv4-6
+ - name: link_mode
+ value: active
+ links:
+ -
+ - name: linknumber
+ value: 1
+ - name: link_priority
+ value: 5
+ -
+ - name: linknumber
+ value: 0
+ - name: link_priority
+ value: 10
+ compression:
+ - name: level
+ value: 5
+ - name: model
+ value: zlib
+ crypto:
+ - name: cipher
+ value: none
+ - name: hash
+ value: none
+ ha_cluster_totem:
+ options:
+ - name: block_unlisted_ips
+ value: 'yes'
+ - name: send_join
+ value: 0
+ ha_cluster_quorum:
+ options:
+ - name: auto_tie_breaker
+ value: 1
+ - name: wait_for_all
+ value: 1
+
+ roles:
+ - linux-system-roles.ha_cluster
These variables need to be set in inventory or via
-host_vars
. Of course the SBD kernel modules and device path
-might differ depending on your setup.
all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
After setting the inventory correctly, use this playbook to configure -a complete SBD setup including loading watchdog modules and creating the -SBD stonith resource.
-- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- # taken from host_vars
- - name: devices
- value: "{{ ha_cluster.sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
ha_cluster_node_options
variable- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-enabled
- value: 'true'
- - name: no-quorum-policy
- value: stop
-
- roles:
- - linux-system-roles.ha_cluster
ha_cluster
+variableThe same result can be achieved by specifying node-specific options +in inventory like this:
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: simple-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: resource-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- instance_attrs:
- - attrs:
- - name: fake
- value: fake-value
- - name: passwd
- value: passwd-value
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- operations:
- - action: start
- attrs:
- - name: timeout
- value: '30s'
- - action: monitor
- attrs:
- - name: timeout
- value: '5'
- - name: interval
- value: '1min'
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: simple-clone
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: clone-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: bundled-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- ha_cluster_resource_groups:
- - id: simple-group
- resource_ids:
- - example-1
- - example-2
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- - id: cloned-group
- resource_ids:
- - example-3
- ha_cluster_resource_clones:
- - resource_id: simple-clone
- - resource_id: clone-with-options
- promotable: true
- id: custom-clone-id
- meta_attrs:
- - attrs:
- - name: clone-max
- value: '2'
- - name: clone-node-max
- value: '1'
- - resource_id: cloned-group
- promotable: true
- ha_cluster_resource_bundles:
- - id: bundle-with-resource
- resource-id: bundled-resource
- container:
- type: podman
- options:
- - name: image
- value: my:image
- network_options:
- - name: control-port
- value: 3121
- port_map:
- -
- - name: port
- value: 10001
- -
- - name: port
- value: 10002
- - name: internal-port
- value: 10003
- storage_map:
- -
- - name: source-dir
- value: /srv/daemon-data
- - name: target-dir
- value: /var/daemon/data
- -
- - name: source-dir-root
- value: /var/log/pacemaker/bundles
- - name: target-dir
- value: /var/log/daemon
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
-
- roles:
- - linux-system-roles.ha_cluster
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+Variables specified in inventory can be omitted when writing the +playbook:
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # Set a different `resource-stickiness` value during and outside work
- # hours. This allows resources to automatically move back to their most
- # preferred hosts, but at a time that (in theory) does not interfere with
- # business activities.
- ha_cluster_resource_defaults:
- meta_attrs:
- - id: core-hours
- rule: date-spec hours=9-16 weekdays=1-5
- score: 2
- attrs:
- - name: resource-stickiness
- value: INFINITY
- - id: after-hours
- score: 1
- attrs:
- - name: resource-stickiness
- value: 0
- # Default the timeout on all 10-second-interval monitor actions on IPaddr2
- # resources to 8 seconds.
- ha_cluster_resource_operation_defaults:
- meta_attrs:
- - rule: resource ::IPaddr2 and op monitor interval=10s
- score: INFINITY
- attrs:
- - name: timeout
- value: 8s
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
+ha_cluster
variables contain SBD options, those in
+ha_cluster_node_options
have precedence.
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: apc1
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc1.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- - id: apc2
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc2.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- # Nodes have redundant power supplies, apc1 and apc2. Cluster must ensure
- # that when attempting to reboot a node, both power supplies are turned off
- # before either power supply is turned back on.
- ha_cluster_stonith_levels:
- - level: 1
- target: node1
- resource_ids:
- - apc1
- - apc2
- - level: 1
- target: node2
- resource_ids:
- - apc1
- - apc2
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # In order to use constraints, we need resources the constraints will apply
- # to.
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-4
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-5
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-6
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # location constraints
- ha_cluster_constraints_location:
- # resource ID and node name
- - resource:
- id: example-1
- node: node1
- options:
- - name: score
- value: 20
- # resource pattern and node name
- - resource:
- pattern: example-\d+
- node: node1
- options:
- - name: score
- value: 10
- # resource ID and rule
- - resource:
- id: example-2
- rule: '#uname eq node2 and date in_range 2022-01-01 to 2022-02-28'
- # resource pattern and rule
- - resource:
- pattern: example-\d+
- rule: node-type eq weekend and date-spec weekdays=6-7
- # colocation constraints
- ha_cluster_constraints_colocation:
- # simple constraint
- - resource_leader:
- id: example-3
- resource_follower:
- id: example-4
- options:
- - name: score
- value: -5
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- - resource_ids:
- - example-5
- - example-6
- options:
- - name: sequential
- value: "false"
- options:
- - name: score
- value: 20
- # order constraints
- ha_cluster_constraints_order:
- # simple constraint
- - resource_first:
- id: example-1
- resource_then:
- id: example-6
- options:
- - name: symmetrical
- value: "false"
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- options:
- - name: require-all
- value: "false"
- - name: sequential
- value: "false"
- - resource_ids:
- - example-3
- - resource_ids:
- - example-4
- - example-5
- options:
- - name: sequential
- value: "false"
- # ticket constraints
- ha_cluster_constraints_ticket:
- # simple constraint
- - resource:
- id: example-1
- ticket: ticket1
- options:
- - name: loss-policy
- value: stop
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-3
- - example-4
- - example-5
- ticket: ticket2
- options:
- - name: loss-policy
- value: fence
-
- roles:
- - linux-system-roles.ha_cluster
Before you can add a quorum device to a cluster, you need to set the -device up. This is only needed to be done once for each quorum device. -Once it has been set up, you can use a quorom device in any number of -clusters.
-Note that you cannot run a quorum device on a cluster node.
+ ha_cluster_resource_primitives: + - id: xvm-fencing + agent: 'stonith:fence_xvm' + instance_attrs: + - attrs: + - name: pcmk_host_list + value: node1 node2 + - id: simple-resource + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: resource-with-options + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + instance_attrs: + - attrs: + - name: fake + value: fake-value + - name: passwd + value: passwd-value + meta_attrs: + - attrs: + - name: target-role + value: Started + - name: is-managed + value: 'true' + operations: + - action: start + attrs: + - name: timeout + value: '30s' + - action: monitor + attrs: + - name: timeout + value: '5' + - name: interval + value: '1min' + - id: example-1 + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: example-2 + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: example-3 + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: simple-clone + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: clone-with-options + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: bundled-resource + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + ha_cluster_resource_groups: + - id: simple-group + resource_ids: + - example-1 + - example-2 + meta_attrs: + - attrs: + - name: target-role + value: Started + - name: is-managed + value: 'true' + - id: cloned-group + resource_ids: + - example-3 + ha_cluster_resource_clones: + - resource_id: simple-clone + - resource_id: clone-with-options + promotable: true + id: custom-clone-id + meta_attrs: + - attrs: + - name: clone-max + value: '2' + - name: clone-node-max + value: '1' + - resource_id: cloned-group + promotable: true + ha_cluster_resource_bundles: + - id: bundle-with-resource + resource-id: bundled-resource + container: + type: podman + options: + - name: image + value: my:image + network_options: + - name: control-port + value: 3121 + port_map: + - + - name: port + value: 10001 + - + - name: port + value: 10002 + - name: internal-port + value: 10003 + storage_map: + - + - name: source-dir + value: /srv/daemon-data + - name: target-dir + value: /var/daemon/data + - + - name: source-dir-root + value: /var/log/pacemaker/bundles + - name: target-dir + value: /var/log/daemon + meta_attrs: + - attrs: + - name: target-role + value: Started + - name: is-managed + value: 'true' + + roles: + - linux-system-roles.ha_cluster +- hosts: nodeQ
+class="sourceCode yaml">- hosts: node1 node2
vars:
- ha_cluster_cluster_present: false
+ ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_qnetd:
- present: true
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_quorum:
- device:
- model: net
- model_options:
- - name: host
- value: nodeQ
- - name: algorithm
- value: lms
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_node_options:
- - node_name: node1
- attributes:
- - attrs:
- - name: attribute1
- value: value1A
- - name: attribute2
- value: value2A
- - node_name: node2
- attributes:
- - attrs:
- - name: attribute1
- value: value1B
- - name: attribute2
- value: value2B
-
- roles:
- - linux-system-roles.ha_cluster
Before you can add a quorum device to a cluster, you need to set the +device up. This is only needed to be done once for each quorum device. +Once it has been set up, you can use a quorom device in any number of +clusters.
+Note that you cannot run a quorum device on a cluster node.
- hosts: node1 node2
+class="sourceCode yaml">- hosts: nodeQ
vars:
ha_cluster_cluster_present: false
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_quorum:
+ device:
+ model: net
+ model_options:
+ - name: host
+ value: nodeQ
+ - name: algorithm
+ value: lms
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_node_options:
+ - node_name: node1
+ attributes:
+ - attrs:
+ - name: attribute1
+ value: value1A
+ - name: attribute2
+ value: value2A
+ - node_name: node2
+ attributes:
+ - attrs:
+ - name: attribute1
+ value: value1B
+ - name: attribute2
+ value: value2B
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ # To use an ACL role permission reference, the reference must exist in CIB.
+ ha_cluster_resource_primitives:
+ - id: not-for-operator
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ # ACLs must be enabled (using the enable-acl cluster property) in order to
+ # be effective.
+ ha_cluster_cluster_properties:
+ - attrs:
+ - name: enable-acl
+ value: 'true'
+ ha_cluster_acls:
+ acl_roles:
+ - id: operator
+ description: HA cluster operator
+ permissions:
+ - kind: write
+ xpath: //crm_config//nvpair[@name='maintenance-mode']
+ - kind: deny
+ reference: not-for-operator
+ - id: administrator
+ permissions:
+ - kind: write
+ xpath: /cib
+ acl_users:
+ - id: alice
+ roles:
+ - operator
+ - administrator
+ - id: bob
+ roles:
+ - administrator
+ acl_groups:
+ - id: admins
+ roles:
+ - administrator
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_present: false
+
+ roles:
+ - linux-system-roles.ha_cluster
See README-ostree.md