summaryrefslogtreecommitdiffstats
path: root/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py
blob: 93958fc2adb1a639d52e2467e0ec2a79706ef9ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/usr/bin/env python

from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

import os
import pexpect
import sys

from ansible.module_utils.six import PY2

if PY2:
    log_buffer = sys.stdout
else:
    log_buffer = sys.stdout.buffer

env_vars = {
    'ANSIBLE_ROLES_PATH': './roles',
    'ANSIBLE_NOCOLOR': 'True',
    'ANSIBLE_RETRY_FILES_ENABLED': 'False',
}


def run_test(playbook, test_spec, args=None, timeout=10, env=None):

    if not env:
        env = os.environ.copy()
    env.update(env_vars)

    if not args:
        args = sys.argv[1:]

    vars_prompt_test = pexpect.spawn(
        'ansible-playbook',
        args=[playbook] + args,
        timeout=timeout,
        env=env,
    )

    vars_prompt_test.logfile = log_buffer
    for item in test_spec[0]:
        vars_prompt_test.expect(item[0])
        if item[1]:
            vars_prompt_test.send(item[1])
    vars_prompt_test.expect(test_spec[1])
    vars_prompt_test.expect(pexpect.EOF)
    vars_prompt_test.close()


# These are the tests to run. Each test is a playbook and a test_spec.
#
# The test_spec is a list with two elements.
#
# The first element is a list of two element tuples. The first is the regexp to look
# for in the output, the second is the line to send.
#
# The last element is the last string of text to look for in the output.
#
tests = [
    # Basic vars_prompt
    {'playbook': 'vars_prompt-1.yml',
     'test_spec': [
        [('input:', 'some input\r')],
         '"input": "some input"']},

    # Custom prompt
    {'playbook': 'vars_prompt-2.yml',
     'test_spec': [
         [('Enter some input:', 'some more input\r')],
         '"input": "some more input"']},

    # Test confirm, both correct and incorrect
    {'playbook': 'vars_prompt-3.yml',
     'test_spec': [
         [('input:', 'confirm me\r'),
          ('confirm input:', 'confirm me\r')],
         '"input": "confirm me"']},

    {'playbook': 'vars_prompt-3.yml',
     'test_spec': [
         [('input:', 'confirm me\r'),
          ('confirm input:', 'incorrect\r'),
          (r'\*\*\*\*\* VALUES ENTERED DO NOT MATCH \*\*\*\*', ''),
          ('input:', 'confirm me\r'),
          ('confirm input:', 'confirm me\r')],
         '"input": "confirm me"']},

    # Test private
    {'playbook': 'vars_prompt-4.yml',
     'test_spec': [
         [('not_secret', 'this is displayed\r'),
          ('this is displayed', '')],
         '"not_secret": "this is displayed"']},

    # Test hashing
    {'playbook': 'vars_prompt-5.yml',
     'test_spec': [
         [('password', 'Scenic-Improving-Payphone\r'),
          ('confirm password', 'Scenic-Improving-Payphone\r')],
         r'"password": "\$6\$']},

    # Test variables in prompt field
    # https://github.com/ansible/ansible/issues/32723
    {'playbook': 'vars_prompt-6.yml',
     'test_spec': [
         [('prompt from variable:', 'input\r')],
         '']},

    # Test play vars coming from vars_prompt
    # https://github.com/ansible/ansible/issues/37984
    {'playbook': 'vars_prompt-7.yml',
     'test_spec': [
         [('prompting for host:', 'testhost\r')],
         r'testhost.*ok=1']},

    # Test play unsafe toggle
    {'playbook': 'unsafe.yml',
     'test_spec': [
         [('prompting for variable:', '{{whole}}\r')],
         r'testhost.*ok=2']},

    # Test unsupported keys
    {'playbook': 'unsupported.yml',
     'test_spec': [
         [],
         "Invalid vars_prompt data structure, found unsupported key 'when'"]},
]

for t in tests:
    run_test(playbook=t['playbook'], test_spec=t['test_spec'])