summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:48:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:48:59 +0000
commitd835b2cae8abc71958b69362162e6a70c3d7ef63 (patch)
tree81052e3d2ce3e1bcda085f73d925e9d6257dec15 /scripts
parentInitial commit. (diff)
downloadcrmsh-d835b2cae8abc71958b69362162e6a70c3d7ef63.tar.xz
crmsh-d835b2cae8abc71958b69362162e6a70c3d7ef63.zip
Adding upstream version 4.6.0.upstream/4.6.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'scripts')
-rw-r--r--scripts/apache/main.yml69
-rwxr-xr-xscripts/check-uptime/fetch.py7
-rw-r--r--scripts/check-uptime/main.yml19
-rwxr-xr-xscripts/check-uptime/report.py11
-rw-r--r--scripts/clvm-vg/main.yml74
-rw-r--r--scripts/clvm/main.yml39
-rw-r--r--scripts/cryptctl/README.md56
-rw-r--r--scripts/cryptctl/main.yml70
-rw-r--r--scripts/database/main.yml34
-rw-r--r--scripts/db2-hadr/main.yml43
-rw-r--r--scripts/db2/main.yml45
-rw-r--r--scripts/drbd/main.yml41
-rw-r--r--scripts/exportfs/main.yml37
-rw-r--r--scripts/filesystem/main.yml30
-rw-r--r--scripts/gfs2-base/main.yml27
-rw-r--r--scripts/gfs2/main.yml62
-rw-r--r--scripts/haproxy/haproxy.cfg13
-rw-r--r--scripts/haproxy/main.yml37
-rwxr-xr-xscripts/health/collect.py111
-rwxr-xr-xscripts/health/hahealth.py40
-rw-r--r--scripts/health/main.yml16
-rwxr-xr-xscripts/health/report.py134
-rw-r--r--scripts/libvirt/main.yml66
-rw-r--r--scripts/lvm-drbd/main.yml62
-rw-r--r--scripts/lvm/main.yml21
-rw-r--r--scripts/mailto/main.yml29
-rw-r--r--scripts/nfsserver-lvm-drbd/main.yml137
-rw-r--r--scripts/nfsserver/main.yml74
-rw-r--r--scripts/nginx/main.yml63
-rw-r--r--scripts/ocfs2/main.yml76
-rw-r--r--scripts/oracle/main.yml51
-rw-r--r--scripts/raid-lvm/main.yml25
-rw-r--r--scripts/raid1/main.yml17
-rw-r--r--scripts/sap-as/main.yml70
-rw-r--r--scripts/sap-ci/main.yml70
-rw-r--r--scripts/sap-db/main.yml63
-rw-r--r--scripts/sap-simple-stack-plus/main.yml220
-rw-r--r--scripts/sap-simple-stack/main.yml183
-rw-r--r--scripts/sapdb/main.yml32
-rw-r--r--scripts/sapinstance/main.yml48
-rw-r--r--scripts/sbd-device/main.yml63
-rw-r--r--scripts/sbd/main.yml37
-rw-r--r--scripts/virtual-ip/main.yml24
-rw-r--r--scripts/vmware/main.yml60
44 files changed, 2506 insertions, 0 deletions
diff --git a/scripts/apache/main.yml b/scripts/apache/main.yml
new file mode 100644
index 0000000..9af548d
--- /dev/null
+++ b/scripts/apache/main.yml
@@ -0,0 +1,69 @@
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Server
+shortdesc: Apache Webserver
+longdesc: |
+ Configure a resource group containing a virtual IP address and
+ an instance of the Apache web server.
+
+ You can optionally configure a file system resource which will be
+ mounted before the web server is started.
+
+ You can also optionally configure a database resource which will
+ be started before the web server but after mounting the optional
+ file system.
+include:
+ - agent: ocf:heartbeat:apache
+ name: apache
+ longdesc: |
+ The Apache configuration file specified here must be available via the
+ same path on all cluster nodes, and Apache must be configured with
+ mod_status enabled. If in doubt, try running Apache manually via
+ its init script first, and ensure http://localhost:80/server-status is
+ accessible.
+ ops: |
+ op start timeout="40"
+ op stop timeout="60"
+ op monitor interval="10" timeout="20"
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the Apache instance.
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Optional file system mounted before the web server is started.
+ required: false
+ - script: database
+ shortdesc: Optional database started before the web server is started.
+ required: false
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install and configure apache
+ value: false
+actions:
+ - install:
+ - apache2
+ shortdesc: Install the apache package
+ when: install
+ - service:
+ - apache: disable
+ shortdesc: Let cluster manage apache
+ when: install
+ - call: a2enmod status; true
+ shortdesc: Enable status module
+ when: install
+ sudo: true
+ - include: filesystem
+ - include: database
+ - include: virtual-ip
+ - include: apache
+ - cib: |
+ group g-{{id}}
+ {{filesystem:id}}
+ {{database:id}}
+ {{virtual-ip:id}}
+ {{id}}
diff --git a/scripts/check-uptime/fetch.py b/scripts/check-uptime/fetch.py
new file mode 100755
index 0000000..c1cceff
--- /dev/null
+++ b/scripts/check-uptime/fetch.py
@@ -0,0 +1,7 @@
+#!/usr/bin/python3
+import crm_script
+try:
+ uptime = open('/proc/uptime').read().split()[0]
+ crm_script.exit_ok(uptime)
+except Exception as e:
+ crm_script.exit_fail("Couldn't open /proc/uptime: %s" % (e))
diff --git a/scripts/check-uptime/main.yml b/scripts/check-uptime/main.yml
new file mode 100644
index 0000000..d37f712
--- /dev/null
+++ b/scripts/check-uptime/main.yml
@@ -0,0 +1,19 @@
+version: 2.2
+category: Script
+shortdesc: Check uptime of nodes
+longdesc: >
+ Fetches the uptime of all nodes and reports which
+ node has lived longest.
+
+parameters:
+ - name: show_all
+ shortdesc: Show all uptimes
+ type: boolean
+ value: false
+
+actions:
+ - shortdesc: Fetch uptimes
+ collect: fetch.py
+
+ - shortdesc: Report uptime
+ report: report.py
diff --git a/scripts/check-uptime/report.py b/scripts/check-uptime/report.py
new file mode 100755
index 0000000..81710c8
--- /dev/null
+++ b/scripts/check-uptime/report.py
@@ -0,0 +1,11 @@
+#!/usr/bin/python3
+import crm_script
+show_all = crm_script.is_true(crm_script.param('show_all'))
+uptimes = list(crm_script.output(1).items())
+max_uptime = '', 0.0
+for host, uptime in uptimes:
+ if float(uptime) > max_uptime[1]:
+ max_uptime = host, float(uptime)
+if show_all:
+ print("Uptimes: %s" % (', '.join("%s: %s" % v for v in uptimes)))
+print("Longest uptime is %s seconds on host %s" % (max_uptime[1], max_uptime[0]))
diff --git a/scripts/clvm-vg/main.yml b/scripts/clvm-vg/main.yml
new file mode 100644
index 0000000..846c70b
--- /dev/null
+++ b/scripts/clvm-vg/main.yml
@@ -0,0 +1,74 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: Cluster-aware LVM (auto activation)
+longdesc: |
+ Configures a resource to manage the activation of a volume
+ group. Before adding this resource, you need to proceed as below
+ to create new VG and LVs. Refer to manpage lvmlockd(8) for more
+ detailed steps.
+
+ - create VG on shared devices:
+ vgcreate --shared <vgname> <devices>
+
+ - create an LV:
+ lvcreate -an -L <size> -n <lvname> <vgname>
+
+ For LVs in shared VG, there are two activation modes: "exclusive"
+ and "shared". With the "exclusive" mode, a LV activated on one
+ host cannot be activated on another, which is the default option.
+ With the "shared" mode, a LV can be activated concurrently on
+ multiple hosts, and cluster filesystem like OCFS2 can use it.
+
+ If the resource is created with activation_mode="shared", it will
+ be added to the cLVM group resource. The cLVM group resource is
+ assumed to be named g-clvm. This is the name of the resource created
+ by the clvm wizard.
+
+parameters:
+ - name: id
+ shortdesc: Volume group instance ID
+ longdesc: Unique ID for the volume group instance in the cluster.
+ required: true
+ unique: true
+ type: resource
+ value: vg1
+
+ - name: vgname
+ shortdesc: Volume Group Name
+ longdesc: LVM volume group name.
+ required: true
+ type: string
+ value: vg1
+
+ - name: activation_mode
+ shortdesc: LVM activation mode
+ longdesc: |
+ How a VG/LV is activated in cluster, either "exclusive" (default) or "shared".
+ It depends on the filesystem you need to create on the LV to choose the
+ activation mode. For local filesystem like ext4, you need "exclusive" activation.
+ For cluster filesystem like OCFS2, you need "shared" activation.
+ required: false
+ type: string
+ value: exclusive
+
+ - name: clvm-group
+ shortdesc: cLVM Resource Group ID
+ longdesc: ID of the cLVM resource group.
+ type: resource
+ required: false
+ value: g-clvm
+
+actions:
+ - cib: |
+ primitive {{id}} ocf:heartbeat:LVM-activate
+ params vgname="{{vgname}}" vg_access_mode="lvmlockd" activation_mode="{{activation_mode}}"
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=30s timeout=90s
+
+ - crm: configure modgroup {{clvm-group}} add {{id}}
+ shortdesc: Add volume group to the cLVM group resource
+ when: activation_mode == "shared"
diff --git a/scripts/clvm/main.yml b/scripts/clvm/main.yml
new file mode 100644
index 0000000..8ecae60
--- /dev/null
+++ b/scripts/clvm/main.yml
@@ -0,0 +1,39 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: Cluster-aware LVM (lvmlockd)
+longdesc: |
+ Configure a cloned lvmlockd resource.
+
+ NB: Only one lvmlockd resource is necessary, regardless
+ of how many volume groups are managed as resources. To
+ monitor volume groups after configuring lvmlockd, the wizard
+ for activating volume groups can be used. Refer to manpage
+ of lvmlockd for more information.
+
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install packages for lvmlockd
+ value: false
+
+actions:
+ - install:
+ - lvm2-lockd
+ shortdesc: Install the lvm2-lockd package
+ when: install
+ - cib: |
+ primitive dlm ocf:pacemaker:controld
+ op start timeout=90s
+ op stop timeout=100s
+
+ primitive lvmlockd ocf:heartbeat:lvmlockd
+ op start timeout=90s
+ op stop timeout=100s
+
+ group g-clvm dlm lvmlockd
+
+ clone c-clvm g-clvm
+ meta interleave=true ordered=true
diff --git a/scripts/cryptctl/README.md b/scripts/cryptctl/README.md
new file mode 100644
index 0000000..13322c8
--- /dev/null
+++ b/scripts/cryptctl/README.md
@@ -0,0 +1,56 @@
+# cryptctl
+
+## Intorducion
+
+The cryptctl server daemon provides a LUKS-based disk encryption. This script aims to setup an HA environment for the cryptctl-server
+
+## Prerequsits
+
+The cryptctl server needs following resources
+
+* /etc/sysconfig/cryptctl-server The configuration of the server. This will be created once the server was setup and will be only modified if the configuration will be changed like changing the administrator password. It is sufficient to copy this file to all nodes when the cluster will be created.
+* The server certificate files in the directory /etc/cryptctl/servertls/. The content of this directory will not be changed until the certifcates are valid. It is sufficient to copy these files to all nodes when the cluster will be created.
+* /var/lib/cryptctl/keydb The content of this directory must be provided on shared storage like SAN NAS or NFS server. The encryption keys will be saved here. For these directory a Filesystem resource agent will be created.
+* An IP address the cryptctl-server is listening on. An IPAddr2 resource agent will be created for this reason.
+
+## Setup
+
+### Setp cryptctl server
+As first step you have to setup the cryptctl server:
+```shell
+cryptctl init-server
+```
+
+### Create a basic cluster
+If not already done you have to setup a basic cluster with at last two nodes. It is very important that Node1 must be the server where you have confiugred the cryptctl server.
+
+```shell
+crm cluster init -i <NetDev> -A <AdminIP> -n <ClusterName> -y
+```
+
+Join the cluster from other nodes:
+```shell
+ssh <Node2>
+crm cluster join -y <Node1>
+```
+
+### Setup the resource group for the cryptctl server
+
+You can setup all needed resource agents and copy all files to all nodes whit the cryptcl crm-shell-script in one step. It is scrictly recommended to verify the setup in first step:
+
+```shell
+crm script verify cryptctl \
+ cert-path=</etc/cryptctl/servertls/certificate-name> \
+ cert-key-path=</etc/cryptctl/servertls/certificate-key-name> \
+ virtual-ip:ip=<IP-Address> \
+ filesystem:device=<Path to the device>
+```
+
+If the check was succesfull you have to setup the cluster group by running the script:
+```shell
+crm script run cryptctl \
+ cert-path=</etc/cryptctl/servertls/certificate-name> \
+ cert-key-path=</etc/cryptctl/servertls/certificate-key-name> \
+ virtual-ip:ip=<IP-Address> \
+ filesystem:device=<Path to the device>
+```
diff --git a/scripts/cryptctl/main.yml b/scripts/cryptctl/main.yml
new file mode 100644
index 0000000..eabf88c
--- /dev/null
+++ b/scripts/cryptctl/main.yml
@@ -0,0 +1,70 @@
+# Copyright (C) 2022 Peter Varkoly
+# License: GNU General Public License (GPL)
+version: 2.2
+category: System Management
+shortdesc: A utility for setting up LUKS-based disk encryption
+longdesc: |
+ Configure a resource group containing a virtual IP address,
+ a filesystem resource containing the disk encryption keys and records,
+ and a systemd instance of the cryptctl server.
+
+ Furthermore a resource group will be created to bind all resources on the same node.
+parameters:
+ - name: id
+ shortdesc: ID of the resource group
+ value: cryptctl
+ - name: cert-path
+ shortdesc: The path to the created certificate
+ required: true
+ - name: cert-key-path
+ shortdesc: The path to the created certificate key
+ required: true
+
+include:
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the cryptctl instance.
+ required: true
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Filesystem resource containing the disk encryption keys and records
+ required: true
+ parameters:
+ - name: id
+ value: "{{id}}-filesystem"
+ - name: directory
+ value: "/var/lib/cryptctl/keydb"
+ - agent: systemd:cryptctl-server
+ name: cryptctl-server
+ parameters:
+ - name: id
+ value: cryptctl-server-service
+ ops: |
+ op monitor interval=10s
+
+actions:
+ - service: "cryptctl-server:disable"
+ nodes: all
+ shortdesc: "Disable cryptctl-server service on all nodes."
+ - copy: "/etc/sysconfig/cryptctl-server"
+ to: "/etc/sysconfig/cryptctl-server"
+ nodes: all
+ shortdesc: "Copy the configuration to all nodes"
+ - copy: "{{cert-path}}"
+ to: "{{cert-path}}"
+ nodes: all
+ shortdesc: "Copy the certificat file to all nodes"
+ - copy: "{{cert-key-path}}"
+ to: "{{cert-key-path}}"
+ nodes: all
+ shortdesc: "Copy the certificat key file to all nodes"
+ - include: virtual-ip
+ - include: filesystem
+ - include: cryptctl-server
+ - cib: |
+ group group-{{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{cryptctl-server:id}}
+
diff --git a/scripts/database/main.yml b/scripts/database/main.yml
new file mode 100644
index 0000000..749ede7
--- /dev/null
+++ b/scripts/database/main.yml
@@ -0,0 +1,34 @@
+version: 2.2
+category: Database
+shortdesc: MySQL/MariaDB Database
+longdesc: >
+ Configure a MySQL or MariaDB SQL Database.
+ Enable the install option to install the necessary
+ packages for the database.
+include:
+ - agent: ocf:heartbeat:mysql
+ name: database
+ parameters:
+ - name: test_table
+ value: ""
+ ops: |
+ op start timeout=120s
+ op stop timeout=120s
+ op monitor interval=20s timeout=30s
+
+parameters:
+ - name: install
+ shortdesc: Enable to install required packages
+ type: boolean
+ value: false
+
+actions:
+ - install: mariadb
+ shortdesc: Install packages
+ when: install
+ - service:
+ - name: mysql
+ action: disable
+ shortdesc: Let cluster manage the database
+ when: install
+ - include: database
diff --git a/scripts/db2-hadr/main.yml b/scripts/db2-hadr/main.yml
new file mode 100644
index 0000000..9179b70
--- /dev/null
+++ b/scripts/db2-hadr/main.yml
@@ -0,0 +1,43 @@
+version: 2.2
+category: Database
+shortdesc: IBM DB2 Database with HADR
+longdesc: >-
+ Configure an IBM DB2 database resource as active/passive HADR,
+ along with a Virtual IP.
+
+include:
+ - agent: ocf:heartbeat:db2
+ parameters:
+ - name: id
+ required: true
+ shortdesc: DB2 Resource ID
+ longdesc: Unique ID for the database resource in the cluster.
+ type: string
+ value: db2-database
+ - name: instance
+ required: true
+ type: string
+ value: db2inst1
+ - name: dblist
+ value: db1
+ ops: |
+ op start interval="0" timeout="130"
+ op stop interval="0" timeout="120"
+ op promote interval="0" timeout="120"
+ op demote interval="0" timeout="120"
+ op monitor interval="30" timeout="60"
+ op monitor interval="45" role="Master" timeout="60"
+
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the DB2 instance.
+ parameters:
+ - name: id
+ value: db2-virtual-ip
+actions:
+ - include: virtual-ip
+ - include: db2
+ - cib: |
+ ms ms-{{db2:id}} {{db2:id}}
+ meta target-role=Stopped notify=true
+ colocation {{virtual-ip:id}}-with-master inf: {{virtual-ip:id}}:Started ms-{{db2:id}}:Master
+ order {{virtual-ip:id}}-after-master Mandatory: ms-{{db2:id}}:promote {{virtual-ip:id}}:start
diff --git a/scripts/db2/main.yml b/scripts/db2/main.yml
new file mode 100644
index 0000000..95e7461
--- /dev/null
+++ b/scripts/db2/main.yml
@@ -0,0 +1,45 @@
+version: 2.2
+category: Database
+shortdesc: IBM DB2 Database
+longdesc: >-
+ Configure an IBM DB2 database resource, along with a Virtual IP and a file system mount point.
+
+ Note that the file system resource will be stopped initially, in case you need to run mkfs.
+
+include:
+ - agent: ocf:heartbeat:db2
+ parameters:
+ - name: id
+ required: true
+ shortdesc: DB2 Resource ID
+ longdesc: Unique ID for the database resource in the cluster.
+ type: string
+ value: db2-database
+ - name: instance
+ required: true
+ type: string
+ value: db2inst1
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the DB2 instance.
+ parameters:
+ - name: id
+ value: db2-virtual-ip
+ - script: filesystem
+ shortdesc: The file system configured here will be mounted before the DB2 instance.
+ parameters:
+ - name: id
+ value: db2-fs
+ - name: fstype
+ value: xfs
+ - name: directory
+ value: "/db2/db2inst1"
+actions:
+ - include: virtual-ip
+ - include: filesystem
+ - include: db2
+ - cib: |
+ group g-{{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{id}}
+ meta target-role=Stopped
diff --git a/scripts/drbd/main.yml b/scripts/drbd/main.yml
new file mode 100644
index 0000000..29ba472
--- /dev/null
+++ b/scripts/drbd/main.yml
@@ -0,0 +1,41 @@
+version: 2.2
+category: File System
+shortdesc: DRBD Block Device
+longdesc: >-
+ Distributed Replicated Block Device. Configure a DRBD cluster resource.
+
+ Also creates a multistate resource managing the state of DRBD.
+
+ Does not create or modify the referenced DRBD configuration.
+
+parameters:
+ - name: id
+ shortdesc: DRBD Cluster Resource ID
+ required: true
+ value: drbd-data
+ type: resource
+ - name: drbd_resource
+ shortdesc: DRBD Resource Name
+ required: true
+ value: drbd0
+ type: string
+ - name: drbdconf
+ value: "/etc/drbd.conf"
+ - name: install
+ type: boolean
+ shortdesc: Install packages for DRBD
+ value: false
+
+actions:
+ - install: drbd drbd-kmp-default
+ shortdesc: Install packages for DRBD
+ when: install
+ - cib: |
+ primitive {{id}} ocf:linbit:drbd
+ params
+ drbd_resource="{{drbd_resource}}"
+ drbdconf="{{drbdconf}}"
+ op monitor interval="29s" role="Master"
+ op monitor interval="31s" role="Slave"
+ ms ms-{{id}} {{id}}
+ meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
diff --git a/scripts/exportfs/main.yml b/scripts/exportfs/main.yml
new file mode 100644
index 0000000..6dff8f2
--- /dev/null
+++ b/scripts/exportfs/main.yml
@@ -0,0 +1,37 @@
+version: 2.2
+shortdesc: "NFS Exported File System"
+category: NFS
+include:
+ - agent: ocf:heartbeat:exportfs
+ parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Cluster Resource ID
+ type: resource
+ value: exportfs
+ - name: fsid
+ shortdesc: Unique FSID Within Cluster or Starting FSID for Multiple Exports
+ required: true
+ type: integer
+ value: 1
+ - name: directory
+ required: true
+ type: string
+ shortdesc: Mount Point (Directory)
+ longdesc: "The mount point for the file system, e.g.: /srv/nfs/home"
+ - name: options
+ required: true
+ shortdesc: Mount Options
+ longdesc: "Any additional options to be given to the mount command, for example rw,mountpoint"
+ type: string
+ - name: wait_for_leasetime_on_stop
+ required: false
+ shortdesc: Wait for Lease Time on Stop
+ longdesc: If set to true, wait for lease on stop.
+ type: boolean
+ value: true
+ ops: |
+ op monitor interval=30s
+actions:
+ - include: exportfs
diff --git a/scripts/filesystem/main.yml b/scripts/filesystem/main.yml
new file mode 100644
index 0000000..b37cf15
--- /dev/null
+++ b/scripts/filesystem/main.yml
@@ -0,0 +1,30 @@
+version: 2.2
+category: File System
+shortdesc: File System (mount point)
+include:
+ - agent: ocf:heartbeat:Filesystem
+ name: filesystem
+ parameters:
+ - name: id
+ required: true
+ type: resource
+ - name: device
+ required: true
+ type: string
+ - name: directory
+ required: true
+ type: string
+ - name: fstype
+ required: true
+ type: string
+ - name: options
+ required: false
+ type: string
+ ops: |
+ meta target-role=Stopped
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=20s timeout=40s
+
+actions:
+ - include: filesystem
diff --git a/scripts/gfs2-base/main.yml b/scripts/gfs2-base/main.yml
new file mode 100644
index 0000000..47afe0b
--- /dev/null
+++ b/scripts/gfs2-base/main.yml
@@ -0,0 +1,27 @@
+# Copyright (C) 2009 Andrew Beekhof
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Script
+shortdesc: GFS2 File System Base (Cloned)
+longdesc: |
+ This template generates a cloned instance of the GFS2 file system.
+ The file system should be on the device, unless cLVM is used.
+
+parameters:
+ - name: clvm-group
+ shortdesc: cLVM Resource Group ID
+ longdesc: Optional ID of a cLVM resource group.
+ required: False
+
+actions:
+ - cib: |
+ primitive gfs-controld ocf:pacemaker:controld
+
+ clone c-gfs gfs-controld
+ meta interleave=true ordered=true
+
+ - crm: configure modgroup {{clvm-group}} add c-gfs
+ shortdesc: Add gfs controld to cLVM group
+ when: clvm-group
diff --git a/scripts/gfs2/main.yml b/scripts/gfs2/main.yml
new file mode 100644
index 0000000..673cd06
--- /dev/null
+++ b/scripts/gfs2/main.yml
@@ -0,0 +1,62 @@
+# Copyright (C) 2009 Andrew Beekhof
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+shortdesc: GFS2 File System (Cloned)
+longdesc: >-
+ This template generates a cloned instance of the GFS2 file system.
+ The file system should be on the device, unless cLVM is used.
+
+category: File System
+parameters:
+ - name: id
+ shortdesc: File System Resource ID
+ longdesc: "NB: The clone is going to be named c-<id> (e.g. c-bigfs)"
+ example: bigfs
+ required: true
+ type: resource
+ - name: directory
+ shortdesc: Mount Point
+ example: /mnt/bigfs
+ required: true
+ type: string
+ - name: device
+ shortdesc: Device
+ required: true
+ type: string
+ - name: options
+ shortdesc: Mount Options
+ type: string
+ required: false
+ - name: dlm
+ shortdesc: Create DLM Resource and Cloned Group
+ longdesc: If set, create the DLM resource and cloned resource group.
+ type: boolean
+ default: true
+ - name: group
+ shortdesc: Cloned Group Resource ID
+ longdesc: ID of cloned group
+ required: false
+ type: resource
+ default: g-dlm
+actions:
+ - when: dlm
+ cib: |
+ primitive dlm ocf:pacemaker:controld
+ op start timeout=90
+ op stop timeout=60
+ group {{group}} dlm
+ clone c-dlm {{group}} meta interleave=true
+ - cib: |
+ primitive {{id}} ocf:heartbeat:Filesystem
+ directory="{{directory}}"
+ fstype="gfs2"
+ device="{{device}}"
+ {{#options}}options="{{options}}"{{/options}}
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=20s timeout=40s
+
+ - crm: configure modgroup {{group}} add {{id}}
+ shortdesc: Add the GFS2 File System to the Cloned Group
diff --git a/scripts/haproxy/haproxy.cfg b/scripts/haproxy/haproxy.cfg
new file mode 100644
index 0000000..50141a2
--- /dev/null
+++ b/scripts/haproxy/haproxy.cfg
@@ -0,0 +1,13 @@
+global
+ maxconn 256
+ daemon
+
+defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+listen http-in
+ bind 0.0.0.0:80
+ stats enable
diff --git a/scripts/haproxy/main.yml b/scripts/haproxy/main.yml
new file mode 100644
index 0000000..3e784c6
--- /dev/null
+++ b/scripts/haproxy/main.yml
@@ -0,0 +1,37 @@
+version: 2.2
+category: Server
+shortdesc: HAProxy
+longdesc: |
+ HAProxy is a free, very fast and reliable solution offering
+ high availability, load balancing, and proxying for TCP and
+ HTTP-based applications. It is particularly suited for very
+ high traffic web sites and powers quite a number of the
+ world's most visited ones.
+
+ NOTE: Installs a basic haproxy.cfg configuration file.
+ This will overwrite any existing haproxy.cfg.
+
+include:
+ - agent: systemd:haproxy
+ name: haproxy
+ ops: |
+ op monitor interval=10s
+
+parameters:
+ - name: install
+ type: boolean
+ value: false
+ shortdesc: Install and configure HAProxy packages
+
+actions:
+ - install: haproxy
+ nodes: all
+ when: install
+ - service: "haproxy:disable"
+ nodes: all
+ when: install
+ - copy: haproxy.cfg
+ to: /etc/haproxy/haproxy.cfg
+ nodes: all
+ when: install
+ - include: haproxy
diff --git a/scripts/health/collect.py b/scripts/health/collect.py
new file mode 100755
index 0000000..180b866
--- /dev/null
+++ b/scripts/health/collect.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python3
+from __future__ import unicode_literals
+from builtins import str
+import os
+import pwd
+import hashlib
+import platform
+import crm_script
+
+import crmsh.log
+crmsh.log.setup_logging()
+from crmsh.report import utils
+
+data = crm_script.get_input()
+
+PACKAGES = ['booth', 'cluster-glue', 'corosync', 'crmsh', 'csync2', 'drbd',
+ 'fence-agents', 'gfs2', 'gfs2-utils', 'ha-cluster-bootstrap',
+ 'haproxy', 'hawk', 'libdlm', 'libqb', 'ocfs2', 'ocfs2-tools',
+ 'pacemaker', 'pacemaker-mgmt', 'resource-agents', 'sbd']
+
+
+def rpm_info():
+ return crm_script.rpmcheck(PACKAGES)
+
+
+def logrotate_info():
+ return {}
+
+
+def get_user():
+ return pwd.getpwuid(os.getuid()).pw_name
+
+
+def sys_info():
+ sysname, nodename, release, version, machine = os.uname()
+ # The first three columns measure CPU and IO utilization of the
+ # last one, five, and 15 minute periods. The fourth column shows
+ # the number of currently running processes and the total number of
+ # processes. The last column displays the last process ID used.
+ system, node, release, version, machine, processor = platform.uname()
+ distname = utils.get_distro_info()
+ hostname = os.uname()[1]
+
+ uptime = open('/proc/uptime').read().split()
+ loadavg = open('/proc/loadavg').read().split()
+
+ return {'system': system,
+ 'node': node,
+ 'release': release,
+ 'version': version,
+ 'machine': machine,
+ 'processor': processor,
+ 'distname': distname,
+ 'user': get_user(),
+ 'hostname': hostname,
+ 'uptime': uptime[0],
+ 'idletime': uptime[1],
+ 'loadavg': loadavg[2] # 15 minute average
+ }
+
+
+def disk_info():
+ rc, out, err = crm_script.call(['df'], shell=False)
+ if rc == 0:
+ disk_use = []
+ for line in out.split('\n')[1:]:
+ line = line.strip()
+ if line:
+ data = line.split()
+ if len(data) >= 6:
+ disk_use.append((data[5], data[4]))
+ return disk_use
+ return []
+
+
+# configurations out of sync
+
+FILES = [
+ '/etc/csync2/key_hagroup',
+ '/etc/csync2/csync2.cfg',
+ '/etc/corosync/corosync.conf',
+ '/etc/sysconfig/sbd',
+ '/etc/sysconfig/SuSEfirewall2',
+ '/etc/sysconfig/SuSEfirewall2.d/services/cluster'
+ ]
+
+
+def files_info():
+ ret = {}
+ for f in FILES:
+ if os.path.isfile(f):
+ try:
+ ret[f] = hashlib.sha1(open(f).read().encode('utf-8')).hexdigest()
+ except IOError as e:
+ ret[f] = "error: %s" % (e)
+ else:
+ ret[f] = ""
+ return ret
+
+
+try:
+ data = {
+ 'rpm': rpm_info(),
+ 'logrotate': logrotate_info(),
+ 'system': sys_info(),
+ 'disk': disk_info(),
+ 'files': files_info()
+ }
+ crm_script.exit_ok(data)
+except Exception as e:
+ crm_script.exit_fail(str(e))
diff --git a/scripts/health/hahealth.py b/scripts/health/hahealth.py
new file mode 100755
index 0000000..f46aec6
--- /dev/null
+++ b/scripts/health/hahealth.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python3
+import os
+import crm_script as crm
+
+
+if not os.path.isfile('/usr/sbin/crm') and not os.path.isfile('/usr/bin/crm'):
+ # crm not installed
+ crm.exit_ok({'status': 'crm not installed'})
+
+
+def get_from_date():
+ rc, out, err = crm.call("date '+%F %H:%M' --date='1 day ago'", shell=True)
+ return out.strip()
+
+
+def create_report():
+ cmd = ['crm', 'report',
+ '-f', get_from_date(),
+ '-D', '-Z', 'health-report']
+ rc, out, err = crm.call(cmd, shell=False)
+ return rc == 0
+
+
+if not create_report():
+ crm.exit_ok({'status': 'Failed to create report'})
+
+
+def extract_report():
+ rc, out, err = crm.call(['tar', 'xjf', 'health-report.tar.bz2'], shell=False)
+ return rc == 0
+
+
+if not extract_report():
+ crm.exit_ok({'status': 'Failed to extract report'})
+
+analysis = ''
+if os.path.isfile('health-report/analysis.txt'):
+ analysis = open('health-report/analysis.txt').read()
+
+crm.exit_ok({'status': 'OK', 'analysis': analysis})
diff --git a/scripts/health/main.yml b/scripts/health/main.yml
new file mode 100644
index 0000000..7c59bdd
--- /dev/null
+++ b/scripts/health/main.yml
@@ -0,0 +1,16 @@
+version: 2.2
+category: Basic
+shortdesc: Verify health and configuration
+longdesc: |
+ Checks and detects issues with the cluster, by creating and
+ analysing a cluster report.
+
+ Requires SSH access between cluster nodes. This command is
+ also available from the command line as "crm cluster health".
+actions:
+ - collect: collect.py
+ shortdesc: Collect information
+ - apply_local: hahealth.py
+ shortdesc: Run cluster health check
+ - report: report.py
+ shortdesc: Report cluster state
diff --git a/scripts/health/report.py b/scripts/health/report.py
new file mode 100755
index 0000000..51e11d2
--- /dev/null
+++ b/scripts/health/report.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python3
+import os
+import crm_script
+data = crm_script.get_input()
+health_report = data[1]
+
+print("Processing collected information...")
+
+CORE_PACKAGES = ['corosync', 'pacemaker', 'resource-agents']
+
+warnings = []
+errors = []
+
+
+def warn(fmt, *args):
+ warnings.append(fmt % args)
+
+
+def error(fmt, *args):
+ errors.append(fmt % args)
+
+
+# sort {package: {version: [host]}}
+rpm_versions = {}
+
+LOW_UPTIME = 60.0
+HIGH_LOAD = 1.0
+
+for node, info in health_report.items():
+ if node != info['system']['hostname']:
+ error("Hostname mismatch: %s is not %s" %
+ (node, info['system']['hostname']))
+
+ if float(info['system']['uptime']) < LOW_UPTIME:
+ warn("%s: Uptime is low: %ss" % (node, info['system']['uptime']))
+
+ if float(info['system']['loadavg']) > HIGH_LOAD:
+ warn("%s: 15 minute load average is %s" % (node, info['system']['loadavg']))
+
+ for rpm in info['rpm']:
+ if 'error' in rpm:
+ if rpm['name'] not in rpm_versions:
+ rpm_versions[rpm['name']] = {rpm['error']: [node]}
+ else:
+ versions = rpm_versions[rpm['name']]
+ if rpm['error'] in versions:
+ versions[rpm['error']].append(node)
+ else:
+ versions[rpm['error']] = [node]
+ else:
+ if rpm['name'] not in rpm_versions:
+ rpm_versions[rpm['name']] = {rpm['version']: [node]}
+ else:
+ versions = rpm_versions[rpm['name']]
+ if rpm['version'] in versions:
+ versions[rpm['version']].append(node)
+ else:
+ versions[rpm['version']] = [node]
+ for disk, use in info['disk']:
+ use = int(use[:-1])
+ if use > 90:
+ warn("On %s, disk %s usage is %s%%", node, disk, use)
+
+ for logfile, state in info['logrotate'].items():
+ if not state:
+ warn("%s: No log rotation configured for %s" % (node, logfile))
+
+for cp in CORE_PACKAGES:
+ if cp not in rpm_versions:
+ error("Core package '%s' not installed on any node", cp)
+
+for name, versions in rpm_versions.items():
+ if len(versions) > 1:
+ desc = ', '.join('%s (%s)' % (v, ', '.join(nodes)) for v, nodes in list(versions.items()))
+ warn("Package %s: Versions differ! %s", name, desc)
+
+ all_hosts = set(sum([hosts for hosts in list(versions.values())], []))
+ for node in list(health_report.keys()):
+ if len(all_hosts) > 0 and node not in all_hosts:
+ warn("Package '%s' not installed on host '%s'" % (name, node))
+
+
+def compare_system(systems):
+ def check(value, msg):
+ vals = set([system[value] for host, system in systems])
+ if len(vals) > 1:
+ info = ', '.join('%s: %s' % (h, system[value]) for h, system in systems)
+ warn("%s: %s" % (msg, info))
+
+ check('machine', 'Architecture differs')
+ check('release', 'Kernel release differs')
+ check('distname', 'Distribution differs')
+ check('distver', 'Distribution version differs')
+ # check('version', 'Kernel version differs')
+
+
+def compare_files(systems):
+ keys = set()
+ for host, files in systems:
+ keys.update(list(files.keys()))
+ for filename in keys:
+ vals = set([files.get(filename) for host, files in systems])
+ if len(vals) > 1:
+ info = ', '.join('%s: %s' % (h, files.get(filename)) for h, files in systems)
+ warn("%s: %s" % ("Files differ", info))
+
+
+compare_system((h, info['system']) for h, info in health_report.items())
+compare_files((h, info['files']) for h, info in health_report.items())
+
+if crm_script.output(2):
+ report = crm_script.output(2)
+ status = report.get('status')
+ analysis = report.get('analysis')
+ if status and not analysis:
+ warn("Cluster report: %s" % (status))
+ elif analysis:
+ print("INFO: Cluster report:")
+ print(analysis)
+ else:
+ warn("No cluster report generated")
+
+if errors:
+ for e in errors:
+ print("ERROR:", e)
+if warnings:
+ for w in warnings:
+ print("WARNING:", w)
+
+if not errors and not warnings:
+ print("No issues found.")
+
+workdir = os.path.dirname(crm_script.__file__)
+print("\nINFO: health-report in directory \"%s\"" % workdir)
diff --git a/scripts/libvirt/main.yml b/scripts/libvirt/main.yml
new file mode 100644
index 0000000..d982d9f
--- /dev/null
+++ b/scripts/libvirt/main.yml
@@ -0,0 +1,66 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+shortdesc: STONITH for libvirt (kvm / Xen)
+longdesc: >
+ Uses libvirt as a STONITH device to fence a guest node.
+ Create a separate resource for each guest node in the cluster.
+
+ Note that the recommended fencing mechanism is SBD whenever
+ a shared storage device (like a SAN) is available.
+category: Stonith
+parameters:
+ - name: id
+ shortdesc: Resource ID (Name)
+ example: stonith-libvirt
+ required: true
+ type: resource
+ - name: target
+ shortdesc: Node to Manage With STONITH Device
+ type: resource
+ required: true
+ - name: hostlist
+ shortdesc: "List of controlled hosts: hostname[:domain_id].."
+ longdesc: >
+ The optional domain_id defaults to the hostname.
+ type: string
+ required: true
+ - name: hypervisor_uri
+ longdesc: >
+ URI for connection to the hypervisor.
+ driver[+transport]://[username@][hostlist][:port]/[path][?extraparameters]
+ e.g.
+ qemu+ssh://my_kvm_server.mydomain.my/system (uses ssh for root)
+ xen://my_kvm_server.mydomain.my/ (uses TLS for client)
+
+ virsh must be installed (e.g. libvirt-client package) and access control must
+ be configured for your selected URI.
+ example: qemu+ssh://my_kvm_server.example.com/system
+ required: true
+ - name: reset_method
+ required: false
+ example: power_cycle
+ type: string
+ shortdesc: Guest Reset Method
+ longdesc: >
+ A guest reset may be done by a sequence of off and on commands
+ (power_cycle) or by the reboot command. Which method works
+ depend on the hypervisor and guest configuration management.
+ - name: install
+ shortdesc: Enable to Install Required Packages
+ type: boolean
+ required: false
+ value: false
+actions:
+ - install: cluster-glue libvirt-client
+ nodes: all
+ when: install
+ - cib: |
+ primitive {{id}}-{{target}} stonith:external/libvirt
+ params
+ hostlist="{{hostlist}}"
+ hypervisor_uri="{{hypervisor_uri}}"
+ {{#reset_method}}reset_method="{{reset_method}}"{{/reset_method}}
+ op start timeout=60s
+ location l-{{id}}-{{target}} {{id}}-{{target}} -inf: {{target}}
diff --git a/scripts/lvm-drbd/main.yml b/scripts/lvm-drbd/main.yml
new file mode 100644
index 0000000..f435be7
--- /dev/null
+++ b/scripts/lvm-drbd/main.yml
@@ -0,0 +1,62 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: LVM Group on DRBD
+longdesc: |
+ Configure a LVM resource group on top of DRBD.
+
+ A DRBD primitive and Multi-state resource is used to replicate
+ data between the nodes.
+
+ LVM and file system resources are used to make the file systems
+ available on the Primary node.
+
+ For more details on what needs to be prepared to use
+ this wizard, see the Highly Available NFS Storage with
+ DRBD and Pacemaker section of the SUSE Linux Enterprise
+ High Availability Extension 12 SP1 documentation.
+
+parameters:
+ - name: group_id
+ type: resource
+ required: true
+ shortdesc: Group Resource ID
+ value: g-lvm
+
+include:
+ - name: drbd
+ script: drbd
+ required: true
+ parameters:
+ - name: drbd_resource
+ value: vg1
+
+ - name: lvm
+ script: lvm
+ required: true
+ parameters:
+ - name: volgrpname
+ value: vg1
+
+ - name: example_fs
+ shortdesc: Example File System Resource
+ script: filesystem
+ required: false
+ parameters:
+ - name: device
+ value: /dev/example
+ - name: directory
+ value: /srv/example
+ - name: fstype
+ value: xfs
+
+actions:
+ - include: drbd
+ - include: lvm
+ - shortdesc: Configure LVM and File System Group and Constraints
+ cib: |
+ group {{group_id}} {{lvm:id}} {{#example_fs:id}}{{example_fs:id}}{{/example_fs:id}}
+ order o-drbd_before_{{group_id}} Mandatory: ms-{{drbd:id}}:promote {{group_id}}:start
+ colocation c-{{group_id}}_on_drbd inf: {{group_id}} ms-{{drbd:id}}:Master
diff --git a/scripts/lvm/main.yml b/scripts/lvm/main.yml
new file mode 100644
index 0000000..5f87cb0
--- /dev/null
+++ b/scripts/lvm/main.yml
@@ -0,0 +1,21 @@
+version: 2.2
+category: Script
+longdesc: >-
+ Configure a resource for managing an LVM volume group.
+
+ Does not create the referenced volume group.
+
+include:
+ - agent: ocf:heartbeat:LVM
+ name: lvm
+ parameters:
+ - name: id
+ required: true
+ value: lvm
+ type: resource
+ - name: volgrpname
+ required: true
+ type: string
+ ops: |
+ op monitor interval=130s timeout=130s
+ op stop timeout=130s on-fail=fence
diff --git a/scripts/mailto/main.yml b/scripts/mailto/main.yml
new file mode 100644
index 0000000..bcf188e
--- /dev/null
+++ b/scripts/mailto/main.yml
@@ -0,0 +1,29 @@
+version: 2.2
+shortdesc: E-Mail
+longdesc: |
+ Notifies recipient by e-mail in the event of a resource takeover.
+category: Basic
+include:
+ - agent: ocf:heartbeat:MailTo
+ name: mailto
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: email
+ type: email
+ required: true
+ - name: subject
+ type: string
+ required: false
+ ops: |
+ op start timeout="10"
+ op stop timeout="10"
+ op monitor interval="10" timeout="10"
+actions:
+ - install:
+ - mailx
+ shortdesc: Ensure mail package is installed
+ - include: mailto
+ - cib: |
+ clone c-{{id}} {{id}}
diff --git a/scripts/nfsserver-lvm-drbd/main.yml b/scripts/nfsserver-lvm-drbd/main.yml
new file mode 100644
index 0000000..ee4a93e
--- /dev/null
+++ b/scripts/nfsserver-lvm-drbd/main.yml
@@ -0,0 +1,137 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: NFS
+shortdesc: NFS Server on LVM and DRBD
+longdesc: |
+ Configure a highly available two-node NFS server on top of
+ LVM and DRBD.
+
+ A DRBD primitive and Multi-state resource is used to replicate
+ data between the nodes.
+
+ A NFS Kernel service resource ensures that the NFS server daemons
+ are always available.
+
+ LVM and file system resources are used to make the file systems
+ available on the Primary node.
+
+ A virtual NFS root export is needed for NFSv4 clients.
+
+ An example NFS export is configured, corresponding to a file system
+ mounted from the LVM logical volume.
+
+ Finally, a floating IP address resource allows clients to connect to
+ the service regardless of which physical node is primary.
+
+ For more details on what needs to be prepared to use
+ this wizard, see the Highly Available NFS Storage with
+ DRBD and Pacemaker section of the SUSE Linux Enterprise
+ High Availability Extension documentation.
+
+parameters:
+ - name: nfsserver_id
+ type: resource
+ value: nfsserver
+ shortdesc: ID for the NFS Server cluster resource
+ required: true
+
+include:
+ - name: drbd
+ script: drbd
+ required: true
+ parameters:
+ - name: drbd_resource
+ value: nfs
+
+ - name: lvm
+ script: lvm
+ required: true
+ parameters:
+ - name: volgrpname
+ value: nfs
+
+ - name: example_fs
+ shortdesc: Example File System Resource
+ script: filesystem
+ required: false
+ parameters:
+ - name: device
+ value: /dev/nfs/example
+ - name: directory
+ value: /srv/nfs/example
+ - name: fstype
+ value: xfs
+
+ - name: rootfs
+ script: exportfs
+ required: false
+ shortdesc: NFSv4 Virtual File System root.
+ parameters:
+ - name: id
+ value: exportfs-root
+ - name: fsid
+ value: 0
+ - name: directory
+ value: /srv/nfs
+ - name: options
+ value: "rw,crossmnt"
+
+ - script: exportfs
+ required: false
+ shortdesc: Exported NFS mount point.
+ parameters:
+ - name: id
+ value: exportfs
+ - name: directory
+ value: /srv/nfs/example
+ - name: options
+ value: "rw,mountpoint"
+ - name: wait_for_leasetime_on_stop
+ value: true
+
+ - script: virtual-ip
+ required: false
+ shortdesc: Configure a Virtual IP address used to access the NFS mounts.
+
+actions:
+ - shortdesc: Ensure NFS packages are installed
+ install: nfs-client nfs-kernel-server nfs-utils
+
+ - shortdesc: Configure cluster resource for the NFS server
+ cib: |
+ primitive {{nfsserver_id}} \
+ systemd:nfs-server \
+ op monitor interval=30s \
+ clone cl-{{nfsserver_id}} {{nfsserver_id}}
+
+ - include: drbd
+ - include: lvm
+
+ - shortdesc: Configure LVM and File System Group and Constraints
+ cib: |
+ group g-nfs {{lvm:id}} {{#example_fs:id}}{{example_fs:id}}{{/example_fs:id}}
+ order o-drbd_before_nfs Mandatory: ms-{{drbd:id}}:promote g-nfs:start
+ colocation c-nfs_on_drbd inf: g-nfs ms-{{drbd:id}}:Master
+
+ - include: rootfs
+
+ - shortdesc: Clone Root FS Resource and Configure Constraints
+ cib: |
+ clone cl-{{rootfs:id}} {{rootfs:id}}
+ order o-root_before_nfs Mandatory: cl-{{rootfs:id}} g-nfs:start
+ colocation c-nfs_on_root inf: g-nfs cl-{{rootfs:id}}
+ when: rootfs
+ - include: exportfs
+ - shortdesc: Add ExportFS Resource to Group
+ crm: "configure modgroup g-nfs add {{exportfs:id}}"
+ when: exportfs
+ - include: virtual-ip
+ - shortdesc: Add Floating IP Address to Group
+ crm: "configure modgroup g-nfs add {{virtual-ip:id}}"
+ when: virtual-ip
+ - call: /usr/sbin/exportfs -v
+ error: Failed to configure NFS exportfs
+ shortdesc: Check Result of exportfs -v
+ sudo: true
diff --git a/scripts/nfsserver/main.yml b/scripts/nfsserver/main.yml
new file mode 100644
index 0000000..6544bf1
--- /dev/null
+++ b/scripts/nfsserver/main.yml
@@ -0,0 +1,74 @@
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: NFS
+shortdesc: NFS Server
+longdesc: |
+ Configure an NFS server. Requires an existing file system resource,
+ for example a file system running on LVM on DRBD.
+
+parameters:
+ - name: base-id
+ required: true
+ shortdesc: Base File System Resource ID
+ longdesc: The ID of an existing file system resource.
+ type: resource
+ value: base-fs
+
+include:
+ - name: rootfs
+ script: exportfs
+ required: false
+ shortdesc: NFSv4 Virtual File System Root
+ parameters:
+ - name: id
+ value: exportfs-root
+ - name: fsid
+ value: 0
+ - name: directory
+ value: /srv/nfs
+ - name: options
+ value: "rw,crossmnt"
+
+ - script: exportfs
+ required: true
+ shortdesc: Exported NFS Mount Point
+ parameters:
+ - name: id
+ value: exportfs
+ - name: directory
+ value: /srv/nfs/example
+ - name: options
+ value: "rw,mountpoint"
+ - name: wait_for_leasetime_on_stop
+ value: true
+
+ - script: virtual-ip
+ required: false
+ shortdesc: Virtual IP Address Used to Access the NFS Mounts
+
+actions:
+ - crm: "configure show {{base-id}}"
+ shortdesc: Ensure That the File System Resource Exists
+ - install: nfs-client nfs-kernel-server nfs-utils
+ shortdesc: Install NFS Packages
+ - service:
+ - nfsserver: enable
+ - nfsserver: start
+ - include: rootfs
+ - include: exportfs
+ - include: virtual-ip
+ - cib: |
+ group g-nfs {{exportfs:id}} {{virtual-ip:id}}
+ order base-then-nfs Mandatory: {{base-id}} g-nfs
+ colocation nfs-with-base inf: g-nfs {{base-id}}
+ {{#rootfs}}
+ clone c-{{rootfs:id}} {{rootfs:id}}
+ order rootfs-before-nfs Mandatory: c-{{rootfs:id}} g-nfs:start
+ colocation nfs-with-rootfs inf: g-nfs c-{{rootfs:id}}
+ {{/rootfs}}
+ - call: /usr/sbin/exportfs -v
+ error: Failed to configure NFS exportfs
+ shortdesc: Check Result of exportfs -v
+ sudo: true
diff --git a/scripts/nginx/main.yml b/scripts/nginx/main.yml
new file mode 100644
index 0000000..59c8d1f
--- /dev/null
+++ b/scripts/nginx/main.yml
@@ -0,0 +1,63 @@
+# Copyright (C) 2017 Xin Liang
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Server
+shortdesc: Nginx Webserver
+longdesc: |
+ Configure a resource group containing a virtual IP address and
+ an instance of the Nginx web server.
+
+ You can optionally configure a file system resource which will be
+ mounted before the web server is started.
+
+ You can also optionally configure a database resource which will
+ be started before the web server but after mounting the optional
+ file system.
+include:
+ - agent: ocf:heartbeat:nginx
+ name: nginx
+ longdesc: |
+ The Nginx configuration file specified here must be available via the
+ same path on all cluster nodes; And nginx.service should be disabled on
+ all cluster nodes; And "server_name" option in nginx configure file
+ should be related with virtual IP.
+ ops: |
+ op start timeout="40"
+ op stop timeout="60"
+ op monitor interval="10" timeout="30"
+ - script: virtual-ip
+ shortdesc: The IP address configured here will start before the Nginx instance.
+ parameters:
+ - name: id
+ value: "{{id}}-vip"
+ - script: filesystem
+ shortdesc: Optional file system mounted before the web server is started.
+ required: false
+ - script: database
+ shortdesc: Optional database started before the web server is started.
+ required: false
+parameters:
+ - name: install
+ type: boolean
+ shortdesc: Install and configure nginx
+ value: false
+actions:
+ - install:
+ - nginx
+ shortdesc: Install the nginx package
+ when: install
+ - service:
+ - apache: disable
+ shortdesc: Let cluster manage nginx
+ when: install
+ - include: filesystem
+ - include: database
+ - include: virtual-ip
+ - include: nginx
+ - cib: |
+ group g-{{id}}
+ {{filesystem:id}}
+ {{database:id}}
+ {{virtual-ip:id}}
+ {{id}}
diff --git a/scripts/ocfs2/main.yml b/scripts/ocfs2/main.yml
new file mode 100644
index 0000000..c3000dd
--- /dev/null
+++ b/scripts/ocfs2/main.yml
@@ -0,0 +1,76 @@
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: File System
+shortdesc: OCFS2 File System
+longdesc: |
+ Configure an OCFS2 File System resource and add
+ it to a cloned DLM base group. OCFS2 uses the
+ cluster membership services from Pacemaker which
+ run in user space. Therefore, DLM needs to be
+ configured as a clone resource that is present on
+ each node in the cluster.
+
+ The file system resource should be added to a cloned
+ group which includes the DLM resource. This wizard
+ can optionally create both the required DLM resource
+ and the cloned group. The wizard can be reused to create
+ additional OCFS2 file system resources by setting the
+ group name to the name of an already-created cloned group.
+
+ If you are using cLVM, create the DLM resource and clone
+ group using the cLVM wizard. OCFS2 file system resources can
+ then be added to the group using this wizard.
+
+parameters:
+ - name: id
+ shortdesc: OCFS2 File System Resource ID
+ example: bigfs
+ type: resource
+ required: true
+ - name: directory
+ shortdesc: Mount Point
+ example: /mnt/bigfs
+ type: string
+ required: true
+ - name: device
+ shortdesc: Device
+ type: string
+ required: true
+ - name: options
+ shortdesc: Mount Options
+ type: string
+ - name: dlm
+ shortdesc: Create DLM Resource and Cloned Group
+ longdesc: If set, create the DLM resource and cloned resource group.
+ type: boolean
+ default: true
+ - name: group
+ shortdesc: Cloned Group Resource ID
+ longdesc: ID of cloned group
+ required: false
+ type: resource
+ default: g-dlm
+
+actions:
+ - when: dlm
+ cib: |
+ primitive dlm ocf:pacemaker:controld
+ op start timeout=90
+ op stop timeout=60
+ group {{group}} dlm
+ clone c-dlm {{group}} meta interleave=true
+ - cib: |
+ primitive {{id}} ocf:heartbeat:Filesystem
+ directory="{{directory}}"
+ fstype="ocfs2"
+ device="{{device}}"
+ {{#options}}options="{{options}}"{{/options}}
+ op start timeout=60s
+ op stop timeout=60s
+ op monitor interval=20s timeout=40s
+
+ - crm: configure modgroup {{group}} add {{id}}
+ shortdesc: Add the OCFS2 File System to the Cloned Group
diff --git a/scripts/oracle/main.yml b/scripts/oracle/main.yml
new file mode 100644
index 0000000..4a79258
--- /dev/null
+++ b/scripts/oracle/main.yml
@@ -0,0 +1,51 @@
+version: 2.2
+category: Database
+shortdesc: Oracle Database
+longdesc: Configure an Oracle Database cluster resource.
+parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Unique ID for the database cluster resource.
+ type: resource
+ value: oracle
+ - name: sid
+ required: true
+ shortdesc: Database SID
+ type: string
+ value: OracleDB
+ - name: listener
+ shortdesc: Listener.
+ required: true
+ type: string
+ value: LISTENER
+ - name: home
+ required: true
+ shortdesc: Database Home.
+ type: string
+ value: /srv/oracledb
+ - name: user
+ required: true
+ shortdesc: Database User.
+ type: string
+ default: oracle
+actions:
+ - cib: |
+ primitive lsn-{{id}} ocf:heartbeat:oralsnr
+ params
+ sid="{{sid}}"
+ home="{{home}}"
+ user="{{user}}"
+ listener="{{listener}}"
+ op monitor interval="30" timeout="60" depth="0"
+
+ primitive {{id}} ocf:heartbeat:oracle
+ params
+ sid="{{sid}}"
+ home="{{home}}"
+ user="{{user}}"
+ op monitor interval="120s"
+
+ colocation lsn-with-{{id}} inf: {{id}} lsn-{{id}}
+ order lsn-before-{{id}} Mandatory: lsn-{{id}} {{id}}
+ \ No newline at end of file
diff --git a/scripts/raid-lvm/main.yml b/scripts/raid-lvm/main.yml
new file mode 100644
index 0000000..405168f
--- /dev/null
+++ b/scripts/raid-lvm/main.yml
@@ -0,0 +1,25 @@
+version: 2.2
+category: File System
+shortdesc: RAID Hosting LVM
+longdesc: "Configure a RAID 1 host based mirror together with a cluster manager LVM volume group and LVM volumes."
+parameters:
+ - name: id
+ shortdesc: RAID and LVM Group ID
+ longdesc: File systems that should be mounted in the LVM can be added to this group resource.
+ type: resource
+ value: g-raid
+ required: true
+include:
+ - script: raid1
+ parameters:
+ - name: raidconf
+ value: /etc/mdadm.conf
+ type: string
+ - name: raiddev
+ value: /dev/md0
+ type: string
+ - script: lvm
+actions:
+ - include: lvm
+ - include: raid1
+ - cib: group {{id}} {{raid1:id}} {{lvm:id}} meta target-role=stopped
diff --git a/scripts/raid1/main.yml b/scripts/raid1/main.yml
new file mode 100644
index 0000000..47ff607
--- /dev/null
+++ b/scripts/raid1/main.yml
@@ -0,0 +1,17 @@
+version: 2.2
+category: Script
+include:
+ - agent: ocf:heartbeat:Raid1
+ name: raid1
+ parameters:
+ - name: id
+ required: true
+ value: raid1
+ - name: raidconf
+ required: true
+ type: string
+ - name: raiddev
+ required: true
+ type: string
+ ops: |
+ op monitor interval=60s timeout=130s on-fail=fence
diff --git a/scripts/sap-as/main.yml b/scripts/sap-as/main.yml
new file mode 100644
index 0000000..ccb857e
--- /dev/null
+++ b/scripts/sap-as/main.yml
@@ -0,0 +1,70 @@
+version: 2.2
+category: SAP
+shortdesc: SAP ASCS Instance
+longdesc: |
+ Configure a SAP ASCS instance including:
+
+ 1) Virtual IP address for the SAP ASCS instance,
+
+ 2) A file system on shared storage (/usr/sap/SID/ASCS##),
+
+ 3) SAPInstance for ASCS.
+
+parameters:
+ - name: id
+ shortdesc: SAP ASCS Resource Group ID
+ longdesc: Unique ID for the SAP ASCS instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0_sapna0as
+
+include:
+ - script: sapinstance
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_ASCS00_sapna0as
+ - name: InstanceName
+ value: NA0_ASCS00_sapna0as
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as"
+ - script: virtual-ip
+ shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0as
+ - name: ip
+ value: 172.17.2.53
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory"
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0as
+ - name: directory
+ value: "/usr/sap/NA0/ASCS00"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: sapinstance
+ - include: virtual-ip
+ - include: filesystem
+ - cib:
+ group {{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{sapinstance:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-ci/main.yml b/scripts/sap-ci/main.yml
new file mode 100644
index 0000000..7c3468d
--- /dev/null
+++ b/scripts/sap-ci/main.yml
@@ -0,0 +1,70 @@
+version: 2.2
+category: SAP
+shortdesc: SAP Central Instance
+longdesc: |
+ Configure a SAP Central Instance including:
+
+ 1) Virtual IP address for the SAP Central instance,
+
+ 2) A file system on shared storage (/usr/sap/SID/DVEBMGS##),
+
+ 3) SAPInstance for the Central Instance.
+
+parameters:
+ - name: id
+ shortdesc: SAP Central Resource Group ID
+ longdesc: Unique ID for the SAP Central instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0_sapna0ci
+
+include:
+ - script: sapinstance
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci
+ - name: InstanceName
+ value: NA0_DVEBMGS01_sapna0ci
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci"
+ - script: virtual-ip
+ shortdesc: The Virtual IP address configured here will be for the SAP Central instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0ci
+ - name: ip
+ value: 172.17.2.55
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0ci
+ - name: directory
+ value: "/usr/sap/NA0/DVEBMGS01"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: sapinstance
+ - include: virtual-ip
+ - include: filesystem
+ - cib:
+ group {{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{sapinstance:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-db/main.yml b/scripts/sap-db/main.yml
new file mode 100644
index 0000000..b472f3f
--- /dev/null
+++ b/scripts/sap-db/main.yml
@@ -0,0 +1,63 @@
+version: 2.2
+category: SAP
+shortdesc: SAP Database Instance
+longdesc: |
+ Configure a SAP database instance including:
+
+ 1) A virtual IP address for the SAP database instance,
+
+ 2) A file system on shared storage (/sapdb),
+
+ 3) SAPinstance for the database.
+
+parameters:
+ - name: id
+ shortdesc: SAP Database Resource Group ID
+ longdesc: Unique ID for the SAP Database instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sapdb_NA0
+
+include:
+ - script: sapdb
+ required: true
+ - script: virtual-ip
+ shortdesc: The Virtual IP address configured here will be for the SAP Database instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0db
+ - name: ip
+ value: 172.17.2.54
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ shortdesc: "File system resource for the SAP database (typically /sapdb)."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0db
+ - name: directory
+ value: "/sapdb"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: sapdb
+ - include: virtual-ip
+ - include: filesystem
+ - cib:
+ group {{id}}
+ {{virtual-ip:id}}
+ {{filesystem:id}}
+ {{sapdb:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-simple-stack-plus/main.yml b/scripts/sap-simple-stack-plus/main.yml
new file mode 100644
index 0000000..3f1e996
--- /dev/null
+++ b/scripts/sap-simple-stack-plus/main.yml
@@ -0,0 +1,220 @@
+version: 2.2
+category: SAP
+shortdesc: SAP SimpleStack+ Instance
+longdesc: |
+ Configure a SAP instance including:
+
+ 1) Virtual IP addresses for each of the SAP instance services - ASCS, DB and CI,
+
+ 2) A RAID 1 host based mirror,
+
+ 3) A cluster manager LVM volume group and LVM volumes on the RAID 1 host based mirror,
+
+ 4) File systems on shared storage for sapmnt, /sapbd, /usr/sap/SID/ASCS## and /usr/sap/SID/DVEBMGS##,
+
+ 5) SAPinstance for - ASCS, a Database, a Central Instance.
+
+ The difference between this and the SimpleStack is that the ASCS and CI have their own
+ volumes / file systems / mount points rather than just one volume / file system / mount point on /usr/sap.
+
+parameters:
+ - name: id
+ shortdesc: SAP SimpleStack+ Resource Group ID
+ longdesc: Unique ID for the SAP SimpleStack+ instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0
+
+include:
+ - script: raid1
+ required: true
+ parameters:
+ - name: raidconf
+ value: "/etc/mdadm.conf"
+ - name: raiddev
+ value: "/dev/md0"
+
+ - script: lvm
+ required: true
+ shortdesc: LVM logical volumes for the SAP file systems.
+ parameters:
+ - name: volgrpname
+ value: sapvg
+
+ - script: filesystem
+ name: filesystem-sapmnt
+ required: true
+ shortdesc: File system resource for the sapmnt directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapmnt
+ - name: directory
+ value: "/sapmnt"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: filesystem
+ name: filesystem-usrsap
+ required: true
+ shortdesc: File system resource for the /usr/sap directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_usrsap
+ - name: directory
+ value: "/usr/sap"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapdb
+ required: true
+
+ - script: virtual-ip
+ name: virtual-ip-db
+ shortdesc: The Virtual IP address configured here will be for the SAP Database instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0db
+ - name: ip
+ value: 172.17.2.54
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ name: filesystem-db
+ shortdesc: "File system resource for the SAP database (typically /sapdb)."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0db
+ - name: directory
+ value: "/sapdb"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapinstance
+ name: sapinstance-as
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_ASCS00_sapna0as
+ - name: InstanceName
+ value: NA0_ASCS00_sapna0as
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as"
+ - script: virtual-ip
+ name: virtual-ip-as
+ shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0as
+ - name: ip
+ value: 172.17.2.53
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ name: filesystem-as
+ shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0as
+ - name: directory
+ value: "/usr/sap/NA0/ASCS00"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapinstance
+ name: sapinstance-ci
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci
+ - name: InstanceName
+ value: NA0_DVEBMGS01_sapna0ci
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci"
+ - script: virtual-ip
+ name: virtual-ip-ci
+ shortdesc: The Virtual IP address configured here will be for the SAP Central instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0ci
+ - name: ip
+ value: 172.17.2.55
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+ - script: filesystem
+ name: filesystem-ci
+ shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0ci
+ - name: directory
+ value: "/usr/sap/NA0/DVEBMGS01"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+actions:
+ - include: raid1
+ - include: lvm
+ - include: filesystem-sapmnt
+ - include: filesystem-db
+ - include: filesystem-ci
+ - include: filesystem-as
+ - include: virtual-ip-ci
+ - include: virtual-ip-db
+ - include: virtual-ip-as
+ - include: sapdb
+ - include: sapinstance-as
+ - include: sapinstance-ci
+ - cib:
+ group {{id}}
+ {{raid1:id}}
+ {{lvm:id}}
+ {{virtual-ip-db:id}}
+ {{filesystem-sapmnt:id}}
+ {{filesystem-db:id}}
+ {{sapdb:id}}
+ {{virtual-ip-as:id}}
+ {{filesystem-as:id}}
+ {{sapinstance-as:id}}
+ {{virtual-ip-ci:id}}
+ {{filesystem-ci:id}}
+ {{sapinstance-ci:id}}
+ meta target-role=Stopped
diff --git a/scripts/sap-simple-stack/main.yml b/scripts/sap-simple-stack/main.yml
new file mode 100644
index 0000000..654dd47
--- /dev/null
+++ b/scripts/sap-simple-stack/main.yml
@@ -0,0 +1,183 @@
+---
+version: 2.2
+category: SAP
+shortdesc: SAP SimpleStack Instance
+longdesc: |
+ Configure a SAP instance including:
+
+ 1) Virtual IP addresses for each of the SAP instance services - ASCS, DB and CI,
+
+ 2) A RAID 1 host based mirror,
+
+ 3) A cluster manager LVM volume group and LVM volumes on the RAID 1 host based mirror,
+
+ 4) File systems on shared storage for sapmnt, /sapbd and /usr/sap,
+
+ 5) SAPinstance for - ASCS, a Database, a Central Instance.
+
+parameters:
+ - name: id
+ shortdesc: SAP Simple Stack Resource Group ID
+ longdesc: Unique ID for the SAP SimpleStack instance resource group in the cluster.
+ required: true
+ type: resource
+ value: grp_sap_NA0
+
+include:
+ - script: raid1
+ required: true
+ parameters:
+ - name: raidconf
+ value: "/etc/mdadm.conf"
+ - name: raiddev
+ value: "/dev/md0"
+
+ - script: lvm
+ required: true
+ shortdesc: LVM logical volumes for the SAP file systems.
+ parameters:
+ - name: volgrpname
+ value: sapvg
+
+ - script: filesystem
+ name: filesystem-sapmnt
+ required: true
+ shortdesc: File system resource for the sapmnt directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapmnt
+ - name: directory
+ value: "/sapmnt"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: filesystem
+ name: filesystem-usrsap
+ required: true
+ shortdesc: File system resource for the /usr/sap directory.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_usrsap
+ - name: directory
+ value: "/usr/sap"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapdb
+ required: true
+
+ - script: virtual-ip
+ name: virtual-ip-db
+ shortdesc: The Virtual IP address configured here will be for the SAP Database instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0db
+ - name: ip
+ value: 172.17.2.54
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+
+ - script: filesystem
+ name: filesystem-db
+ shortdesc: "File system resource for the SAP database (typically /sapdb)."
+ longdesc: >-
+ If a file system does not already exist on the block device
+ specified here, you will need to run mkfs to create it, prior
+ to starting the file system resource. You will also need
+ to create the mount point directory on all cluster nodes.
+ parameters:
+ - name: id
+ value: rsc_fs_NA0_sapna0db
+ - name: directory
+ value: "/sapdb"
+ - name: options
+ value: "noatime,barrier=0,data=writeback"
+ ops: |
+ op stop timeout=300
+ op monitor interval=30 timeout=130
+
+ - script: sapinstance
+ name: sapinstance-as
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_ASCS00_sapna0as
+ - name: InstanceName
+ value: NA0_ASCS00_sapna0as
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as"
+
+ - script: virtual-ip
+ name: virtual-ip-as
+ shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0as
+ - name: ip
+ value: 172.17.2.53
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+
+ - script: sapinstance
+ name: sapinstance-ci
+ required: true
+ parameters:
+ - name: id
+ value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci
+ - name: InstanceName
+ value: NA0_DVEBMGS01_sapna0ci
+ - name: START_PROFILE
+ value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci"
+
+ - script: virtual-ip
+ name: virtual-ip-ci
+ shortdesc: The Virtual IP address configured here will be for the SAP Central instance.
+ required: true
+ parameters:
+ - name: id
+ value: rsc_ip_NA0_sapna0ci
+ - name: ip
+ value: 172.17.2.55
+ - name: cidr_netmask
+ value: 24
+ - name: nic
+ value: eth0
+
+actions:
+ - include: raid1
+ - include: lvm
+ - include: filesystem-usrsap
+ - include: filesystem-sapmnt
+ - include: filesystem-db
+ - include: virtual-ip-ci
+ - include: virtual-ip-db
+ - include: virtual-ip-as
+ - include: sapdb
+ - include: sapinstance-as
+ - include: sapinstance-ci
+ - cib:
+ group {{id}}
+ {{raid1:id}}
+ {{lvm:id}}
+ {{virtual-ip-ci:id}}
+ {{virtual-ip-db:id}}
+ {{virtual-ip-as:id}}
+ {{filesystem-usrsap:id}}
+ {{filesystem-sapmnt:id}}
+ {{filesystem-db:id}}
+ {{sapdb:id}}
+ {{sapinstance-as:id}}
+ {{sapinstance-ci:id}}
+ meta target-role=Stopped
diff --git a/scripts/sapdb/main.yml b/scripts/sapdb/main.yml
new file mode 100644
index 0000000..db67785
--- /dev/null
+++ b/scripts/sapdb/main.yml
@@ -0,0 +1,32 @@
+version: 2.2
+category: Script
+shortdesc: SAP Database Instance
+longdesc: Create a single SAP Database Instance.
+
+parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Unique ID for this SAP instance resource in the cluster.
+ type: resource
+ value: rsc_sabdb_NA0
+ - name: SID
+ required: true
+ shortdesc: Database SID
+ longdesc: The SID for the database.
+ type: string
+ value: NA0
+ - name: DBTYPE
+ required: true
+ shortdesc: Database Type
+ longdesc: The type of database.
+ value: ADA
+ type: string
+
+actions:
+ - cib: |
+ primitive {{id}} ocf:heartbeat:SAPDatabase
+ params SID="{{SID}}" DBTYPE="{{DBTYPE}}"
+ op monitor interval="120" timeout="60" start-delay="180"
+ op start timeout="1800"
+ op stop timeout="1800"
diff --git a/scripts/sapinstance/main.yml b/scripts/sapinstance/main.yml
new file mode 100644
index 0000000..b6da1b5
--- /dev/null
+++ b/scripts/sapinstance/main.yml
@@ -0,0 +1,48 @@
+version: 2.2
+category: Script
+shortdesc: SAP Instance
+longdesc: Create a single SAP Instance.
+
+parameters:
+ - name: id
+ required: true
+ shortdesc: Resource ID
+ longdesc: Unique ID for this SAP instance resource in the cluster.
+ type: resource
+ value: sapinstance
+ - name: InstanceName
+ required: true
+ shortdesc: Instance Name
+ longdesc: The name of the SAP instance.
+ type: string
+ value: sapinstance
+ - name: START_PROFILE
+ required: true
+ shortdesc: Start Profile
+ longdesc: This defines the path and the file name of the SAP start profile of this particular instance.
+ type: string
+ - name: AUTOMATIC_RECOVER
+ required: true
+ shortdesc: Automatic Recover
+ longdesc: >-
+ The SAPInstance resource agent tries to recover a failed start
+ attempt automaticaly one time. This is done by killing runing
+ instance processes, removing the kill.sap file and executing
+ cleanipc. Sometimes a crashed SAP instance leaves some
+ processes and/or shared memory segments behind. Setting this
+ option to true will try to remove those leftovers during a
+ start operation. That is to reduce manual work for the
+ administrator.
+ type: boolean
+ value: true
+
+actions:
+ - cib: |
+ primitive {{id}} ocf:heartbeat:SAPInstance
+ params
+ InstanceName="{{InstanceName}}"
+ AUTOMATIC_RECOVER="{{AUTOMATIC_RECOVER}}"
+ START_PROFILE="{{START_PROFILE}}"
+ op monitor interval="180" timeout="60" start-delay="240"
+ op start timeout="240"
+ op stop timeout="240" on-fail="block"
diff --git a/scripts/sbd-device/main.yml b/scripts/sbd-device/main.yml
new file mode 100644
index 0000000..27fe8d0
--- /dev/null
+++ b/scripts/sbd-device/main.yml
@@ -0,0 +1,63 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Script
+shortdesc: "Create SBD Device"
+longdesc: |
+ Optional step to initialize and configure the SBD Device.
+
+ Prerequisites:
+
+ * The environment must have shared storage reachable by all nodes.
+
+parameters:
+ - name: device
+ shortdesc: Shared Storage Device
+ example: /dev/disk/by-id/...
+ required: true
+ type: string
+
+ - name: watchdog
+ shortdesc: Watchdog Device
+ value: /dev/watchdog
+ type: string
+
+actions:
+ - shortdesc: Verify configuration
+ sudo: true
+ call: |
+ #!/bin/sh
+ set -e
+ systemctl is-active --quiet sbd && { echo "ERROR: SBD daemon is already running"; exit 1; } || true
+ test -b "{{device}}" || { echo "ERROR: Not a device: {{device}"; exit 1; }
+ lsmod | egrep "(wd|dog)" || { echo "ERROR: No watchdog kernel module loaded"; exit 1; }
+ test -c "{{watchdog}}" || { echo "ERROR: Not a device: {{watchdog}}"; exit 1; }
+
+ - shortdesc: Initialize the SBD device
+ sudo: true
+ nodes: local
+ call: |
+ #!/bin/sh
+ sbd dump &> /dev/null || sbd -d "{{device}}" create
+ # sbd allocate "$(uname -n)" # FIXME
+
+ - shortdesc: Verify SBD Device
+ call: |
+ #!/bin/sh
+ sbd -d "{{device}}" list
+
+ - shortdesc: Configure SBD Daemon
+ sudo: true
+ call: |
+ #!/bin/sh
+ [ -f "/etc/sysconfig/sbd" ] && rm -f /etc/sysconfig/sbd || true
+ <<EOF
+ SBD_DEVICE="{{device}}"
+ SBD_WATCHDOG="yes"
+ SBD_WATCHDOG_DEV="{{watchdog}}"
+ EOF > /etc/sysconfig/sbd
+
+ - shortdesc: Enable SBD Daemon
+ service:
+ - sbd: start
diff --git a/scripts/sbd/main.yml b/scripts/sbd/main.yml
new file mode 100644
index 0000000..f86ef22
--- /dev/null
+++ b/scripts/sbd/main.yml
@@ -0,0 +1,37 @@
+# Copyright (C) 2009 Dejan Muhamedagic
+# Copyright (C) 2015 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Stonith
+shortdesc: "SBD, Shared storage based fencing"
+longdesc: |
+ Create a SBD STONITH resource. SBD must be configured to use
+ a particular shared storage device using /etc/sysconfig/sbd.
+
+ This wizard can optionally create and configure a SBD device.
+ A shared device must be available and visible on all nodes.
+
+ For more information, see http://www.linux-ha.org/wiki/SBD_Fencing
+ or the sbd(8) manual page.
+
+parameters:
+ - name: id
+ shortdesc: Resource ID (Name)
+ value: sbd-fencing
+ example: sbd-fencing
+ required: true
+ type: resource
+
+include:
+ - script: sbd-device
+ required: false
+
+actions:
+ - include: sbd-device
+
+ - cib: |
+ primitive {{id}} stonith:external/sbd
+ pcmk_delay_max=30s
+
+ property stonith-enabled=true
diff --git a/scripts/virtual-ip/main.yml b/scripts/virtual-ip/main.yml
new file mode 100644
index 0000000..1ccb19e
--- /dev/null
+++ b/scripts/virtual-ip/main.yml
@@ -0,0 +1,24 @@
+version: 2.2
+shortdesc: Virtual IP
+category: Basic
+include:
+ - agent: ocf:heartbeat:IPaddr2
+ name: virtual-ip
+ parameters:
+ - name: id
+ type: resource
+ required: true
+ - name: ip
+ type: ip_address
+ required: true
+ - name: cidr_netmask
+ type: integer
+ required: false
+ - name: broadcast
+ type: string
+ required: false
+ ops: |
+ op start timeout="20" op stop timeout="20"
+ op monitor interval="10" timeout="20"
+actions:
+ - include: virtual-ip
diff --git a/scripts/vmware/main.yml b/scripts/vmware/main.yml
new file mode 100644
index 0000000..0fd68d4
--- /dev/null
+++ b/scripts/vmware/main.yml
@@ -0,0 +1,60 @@
+# Copyright (C) 2016 Kristoffer Gronlund
+#
+# License: GNU General Public License (GPL)
+version: 2.2
+category: Stonith
+shortdesc: Fencing using vCenter / ESX Server
+longdesc: |
+ Note that SBD is the recommended fencing mechanism for VMware
+ hosts! Please refer to the documentation for more details on
+ recommended fencing configurations.
+
+ Fencing for VMware virtualized hosts using ESX Server or vCenter.
+
+ This wizard configures a fencing resource for a single node.
+ It is necessary to run the wizard for each node to fence.
+
+ Prerequisites
+
+ 1. Install the vSphere Web Services SDK on all nodes.
+
+ 2. Generate vCenter credentials using credstore_admin.pl
+
+ 3. Copy credentials to the same location on all nodes.
+
+parameters:
+ - name: id
+ type: resource
+ shortdesc: Base Resource ID
+ value: vcenter-fencing
+ required: true
+ - name: node_name
+ type: string
+ shortdesc: Name of node to fence
+ required: true
+ - name: machine_name
+ type: string
+ shortdesc: Name of machine in vCenter inventory
+ required: true
+ - name: server
+ type: string
+ shortdesc: VCenter server URL
+ required: true
+ example: vcenter.example.com
+ - name: credstore
+ type: string
+ shortdesc: Credentials file name
+ required: true
+
+actions:
+ - cib: |
+ primitive {{id}}-{{node_name}} stonith:external/vcenter
+ VI_SERVER="{{server}}"
+ VI_CREDSTORE="{{credstore}}"
+ HOSTLIST="{{node_name}}={{machine_name}}"
+ RESETPOWERON="0"
+ pcmk_host_check="static-list"
+ pcmk_host_list="{{node_name}}"
+ op monitor interval="60s"
+ location loc-{{id}}-{{node_name}} {{id}}-{{node_name}} -inf: {{node_name}}
+ property stonith-enabled=true