summaryrefslogtreecommitdiffstats
path: root/test/features/qdevice_validate.feature
blob: 5403a527ab6de34ac821ee9d7cd97844849ffc45 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
@qdevice
Feature: corosync qdevice/qnetd options validate

  Tag @clean means need to stop cluster service if the service is available
  Need nodes: hanode1 hanode2 hanode3 qnetd-node node-without-ssh

  @clean
  Scenario: Option "--qnetd-hostname" use the same node
    When    Try "crm cluster init --qnetd-hostname=hanode1"
    Then    Except "ERROR: cluster.init: host for qnetd must be a remote one"

  @clean
  Scenario: Option "--qnetd-hostname" use hanode1's IP
    When    Try "crm cluster init --qnetd-hostname=@hanode1.ip.0"
    Then    Except "ERROR: cluster.init: host for qnetd must be a remote one"

  @clean
  Scenario: Option "--qnetd-hostname" use unknown hostname
    When    Try "crm cluster init --qnetd-hostname=error-node"
    Then    Except "ERROR: cluster.init: host "error-node" is unreachable"

  @clean
  Scenario: Service ssh on qnetd node not available
    When    Run "systemctl stop sshd.service" on "node-without-ssh"
    When    Try "crm cluster init --qnetd-hostname=node-without-ssh"
    Then    Except "ERROR: cluster.init: ssh service on "node-without-ssh" not available"

  @clean
  Scenario: Option "--qdevice-port" set wrong port
    When    Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-port=1"
    Then    Except "ERROR: cluster.init: invalid qdevice port range(1024 - 65535)"

  @clean
  Scenario: Option "--qdevice-tie-breaker" set wrong value
    When    Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-tie-breaker=wrongtiebreaker"
    Then    Except "ERROR: cluster.init: invalid qdevice tie_breaker(lowest/highest/valid_node_id)"

  @clean
  Scenario: Option "--qdevice-heuristics" set wrong value
    When    Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='ls /opt'"
    Then    Except "ERROR: cluster.init: commands for heuristics should be absolute path"
    When    Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics='/bin/not_exist_cmd /opt'"
    Then    Except "ERROR: cluster.init: command /bin/not_exist_cmd not exist"

  @clean
  Scenario: Option "--qnetd-hostname" is required by other qdevice options
    When    Try "crm cluster init --qdevice-port=1234"
    Then    Except multiple lines
      """
      usage: init [options] [STAGE]
      crm: error: Option --qnetd-hostname is required if want to configure qdevice
      """

  @clean
  Scenario: Option --qdevice-heuristics is required if want to configure heuristics mode
    When    Try "crm cluster init --qnetd-hostname=qnetd-node --qdevice-heuristics-mode="on""
    Then    Except multiple lines
      """
      usage: init [options] [STAGE]
      crm: error: Option --qdevice-heuristics is required if want to configure heuristics mode
      """

  @clean
  Scenario: Node for qnetd not installed corosync-qnetd
    Given   Cluster service is "stopped" on "hanode2"
    When    Try "crm cluster init --qnetd-hostname=hanode2 -y"
    Then    Except multiple lines
      """
      ERROR: cluster.init: Package "corosync-qnetd" not installed on hanode2!
      Cluster service already successfully started on this node except qdevice service.
      If you still want to use qdevice, install "corosync-qnetd" on hanode2.
      Then run command "crm cluster init" with "qdevice" stage, like:
        crm cluster init qdevice qdevice_related_options
      That command will setup qdevice separately.
      """
    And     Cluster service is "started" on "hanode1"

  @clean
  Scenario: Raise error when adding qdevice stage with the same cluster name
    Given   Cluster service is "stopped" on "hanode2"
    Given   Cluster service is "stopped" on "hanode3"
    When    Run "crm cluster init -n cluster1 -y" on "hanode2"
    Then    Cluster service is "started" on "hanode2"
    When    Run "crm cluster init -n cluster1 -y" on "hanode3"
    Then    Cluster service is "started" on "hanode3"
    When    Try "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode2,hanode3"
    Then    Except "ERROR: cluster.init: Duplicated cluster name "cluster1"!"
    When    Run "crm cluster stop" on "hanode2"
    When    Run "crm cluster stop" on "hanode3"

  @clean
  Scenario: Raise error when the same cluster name already exists on qnetd
    Given   Cluster service is "stopped" on "hanode1"
    Given   Cluster service is "stopped" on "hanode2"
    When    Try "crm cluster init -n cluster1 --qnetd-hostname=qnetd-node -y" on "hanode2"
    When    Try "crm cluster init -n cluster1 --qnetd-hostname=qnetd-node -y"
    Then    Except multiple lines
      """
      ERROR: cluster.init: This cluster's name "cluster1" already exists on qnetd server!
      Cluster service already successfully started on this node except qdevice service.
      If you still want to use qdevice, consider to use the different cluster-name property.
      Then run command "crm cluster init" with "qdevice" stage, like:
        crm cluster init qdevice qdevice_related_options
      That command will setup qdevice separately.
      """
    And     Cluster service is "started" on "hanode1"
    And     Cluster service is "started" on "hanode2"

  @clean
  Scenario: Run qdevice stage on inactive cluster node
    Given   Cluster service is "stopped" on "hanode1"
    When    Try "crm cluster init qdevice --qnetd-hostname=qnetd-node"
    Then    Except "ERROR: cluster.init: Cluster is inactive - can't run qdevice stage"

  @clean
  Scenario: Run qdevice stage but miss "--qnetd-hostname" option
    Given   Cluster service is "stopped" on "hanode1"
    When    Run "crm cluster init -y" on "hanode1"
    Then    Cluster service is "started" on "hanode1"
    When    Try "crm cluster init qdevice -y"
    Then    Except multiple lines
      """
      usage: init [options] [STAGE]
      crm: error: Option --qnetd-hostname is required if want to configure qdevice
      """

  @clean
  Scenario: Setup qdevice on a single node cluster with RA running(bsc#1181415)
    When    Run "crm cluster init -y" on "hanode1"
    Then    Cluster service is "started" on "hanode1"
    And     Service "corosync-qdevice" is "stopped" on "hanode1"
    When    Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
    When    Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1"
    Then    Expected "WARNING: To use qdevice service, need to restart cluster service manually on each node" in stderr
    And     Service "corosync-qdevice" is "stopped" on "hanode1"
    When    Run "crm cluster restart" on "hanode1"
    Then    Service "corosync-qdevice" is "started" on "hanode1"

  @clean
  Scenario: Remove qdevice from a single node cluster(bsc#1181415)
    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
    Then    Cluster service is "started" on "hanode1"
    And     Service "corosync-qdevice" is "started" on "hanode1"
    When    Run "crm cluster remove --qdevice -y" on "hanode1"
    Then    Expected "Restarting cluster service" in stdout
    Then    Cluster service is "started" on "hanode1"
    And     Service "corosync-qdevice" is "stopped" on "hanode1"

  @clean
  Scenario: Remove qdevice from a single node cluster which has RA running(bsc#1181415)
    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
    Then    Cluster service is "started" on "hanode1"
    And     Service "corosync-qdevice" is "started" on "hanode1"
    When    Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1"
    When    Run "crm cluster remove --qdevice -y" on "hanode1"
    Then    Expected "WARNING: To remove qdevice service, need to restart cluster service manually on each node" in stderr
    Then    Cluster service is "started" on "hanode1"
    And     Service "corosync-qdevice" is "started" on "hanode1"
    When    Run "crm cluster restart" on "hanode1"
    Then    Cluster service is "started" on "hanode1"
    And     Service "corosync-qdevice" is "stopped" on "hanode1"