diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:04:21 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:04:21 +0000 |
commit | 8a754e0858d922e955e71b253c139e071ecec432 (patch) | |
tree | 527d16e74bfd1840c85efd675fdecad056c54107 /lib/ansible/plugins/connection | |
parent | Initial commit. (diff) | |
download | ansible-core-8a754e0858d922e955e71b253c139e071ecec432.tar.xz ansible-core-8a754e0858d922e955e71b253c139e071ecec432.zip |
Adding upstream version 2.14.3.upstream/2.14.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib/ansible/plugins/connection')
-rw-r--r-- | lib/ansible/plugins/connection/__init__.py | 382 | ||||
-rw-r--r-- | lib/ansible/plugins/connection/local.py | 194 | ||||
-rw-r--r-- | lib/ansible/plugins/connection/paramiko_ssh.py | 695 | ||||
-rw-r--r-- | lib/ansible/plugins/connection/psrp.py | 898 | ||||
-rw-r--r-- | lib/ansible/plugins/connection/ssh.py | 1399 | ||||
-rw-r--r-- | lib/ansible/plugins/connection/winrm.py | 755 |
6 files changed, 4323 insertions, 0 deletions
diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py new file mode 100644 index 0000000..daa683c --- /dev/null +++ b/lib/ansible/plugins/connection/__init__.py @@ -0,0 +1,382 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com> +# (c) 2017, Peter Sprygada <psprygad@redhat.com> +# (c) 2017 Ansible Project +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import fcntl +import os +import shlex +import typing as t + +from abc import abstractmethod +from functools import wraps + +from ansible import constants as C +from ansible.module_utils._text import to_bytes, to_text +from ansible.plugins import AnsiblePlugin +from ansible.utils.display import Display +from ansible.plugins.loader import connection_loader, get_shell_plugin +from ansible.utils.path import unfrackpath + +display = Display() + + +__all__ = ['ConnectionBase', 'ensure_connect'] + +BUFSIZE = 65536 + + +def ensure_connect(func): + @wraps(func) + def wrapped(self, *args, **kwargs): + if not self._connected: + self._connect() + return func(self, *args, **kwargs) + return wrapped + + +class ConnectionBase(AnsiblePlugin): + ''' + A base class for connections to contain common code. + ''' + + has_pipelining = False + has_native_async = False # eg, winrm + always_pipeline_modules = False # eg, winrm + has_tty = True # for interacting with become plugins + # When running over this connection type, prefer modules written in a certain language + # as discovered by the specified file extension. An empty string as the + # language means any language. + module_implementation_preferences = ('',) # type: t.Iterable[str] + allow_executable = True + + # the following control whether or not the connection supports the + # persistent connection framework or not + supports_persistence = False + force_persistence = False + + default_user = None + + def __init__(self, play_context, new_stdin, shell=None, *args, **kwargs): + + super(ConnectionBase, self).__init__() + + # All these hasattrs allow subclasses to override these parameters + if not hasattr(self, '_play_context'): + # Backwards compat: self._play_context isn't really needed, using set_options/get_option + self._play_context = play_context + if not hasattr(self, '_new_stdin'): + self._new_stdin = new_stdin + if not hasattr(self, '_display'): + # Backwards compat: self._display isn't really needed, just import the global display and use that. + self._display = display + if not hasattr(self, '_connected'): + self._connected = False + + self.success_key = None + self.prompt = None + self._connected = False + self._socket_path = None + + # helper plugins + self._shell = shell + + # we always must have shell + if not self._shell: + shell_type = play_context.shell if play_context.shell else getattr(self, '_shell_type', None) + self._shell = get_shell_plugin(shell_type=shell_type, executable=self._play_context.executable) + + self.become = None + + def set_become_plugin(self, plugin): + self.become = plugin + + @property + def connected(self): + '''Read-only property holding whether the connection to the remote host is active or closed.''' + return self._connected + + @property + def socket_path(self): + '''Read-only property holding the connection socket path for this remote host''' + return self._socket_path + + @staticmethod + def _split_ssh_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + # In Python3, shlex.split doesn't work on a byte string. + return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()] + + @property + @abstractmethod + def transport(self): + """String used to identify this Connection class from other classes""" + pass + + @abstractmethod + def _connect(self): + """Connect to the host we've been initialized with""" + + @ensure_connect + @abstractmethod + def exec_command(self, cmd, in_data=None, sudoable=True): + """Run a command on the remote host. + + :arg cmd: byte string containing the command + :kwarg in_data: If set, this data is passed to the command's stdin. + This is used to implement pipelining. Currently not all + connection plugins implement pipelining. + :kwarg sudoable: Tell the connection plugin if we're executing + a command via a privilege escalation mechanism. This may affect + how the connection plugin returns data. Note that not all + connections can handle privilege escalation. + :returns: a tuple of (return code, stdout, stderr) The return code is + an int while stdout and stderr are both byte strings. + + When a command is executed, it goes through multiple commands to get + there. It looks approximately like this:: + + [LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command + :LocalShell: Is optional. It is run locally to invoke the + ``Connection Command``. In most instances, the + ``ConnectionCommand`` can be invoked directly instead. The ssh + connection plugin which can have values that need expanding + locally specified via ssh_args is the sole known exception to + this. Shell metacharacters in the command itself should be + processed on the remote machine, not on the local machine so no + shell is needed on the local machine. (Example, ``/bin/sh``) + :ConnectionCommand: This is the command that connects us to the remote + machine to run the rest of the command. ``ansible_user``, + ``ansible_ssh_host`` and so forth are fed to this piece of the + command to connect to the correct host (Examples ``ssh``, + ``chroot``) + :UsersLoginShell: This shell may or may not be created depending on + the ConnectionCommand used by the connection plugin. This is the + shell that the ``ansible_user`` has configured as their login + shell. In traditional UNIX parlance, this is the last field of + a user's ``/etc/passwd`` entry We do not specifically try to run + the ``UsersLoginShell`` when we connect. Instead it is implicit + in the actions that the ``ConnectionCommand`` takes when it + connects to a remote machine. ``ansible_shell_type`` may be set + to inform ansible of differences in how the ``UsersLoginShell`` + handles things like quoting if a shell has different semantics + than the Bourne shell. + :ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var + ``ansible_shell_executable`` or via + ``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set. + We explicitly invoke this shell so that we have predictable + quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only + settable by the user because some sudo setups may only allow + invoking a specific shell. (For instance, ``/bin/bash`` may be + allowed but ``/bin/sh``, our default, may not). We invoke this + twice, once after the ``ConnectionCommand`` and once after the + ``BecomeCommand``. After the ConnectionCommand, this is run by + the ``UsersLoginShell``. After the ``BecomeCommand`` we specify + that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly. + :BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs + privilege escalation. Setting this up is performed by the action + plugin prior to running ``exec_command``. So we just get passed + :param:`cmd` which has the BecomeCommand already added. + (Examples: sudo, su) If we have a BecomeCommand then we will + invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we + have a consistent view of quoting. + :Command: Is the command we're actually trying to run remotely. + (Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file) + """ + pass + + @ensure_connect + @abstractmethod + def put_file(self, in_path, out_path): + """Transfer a file from local to remote""" + pass + + @ensure_connect + @abstractmethod + def fetch_file(self, in_path, out_path): + """Fetch a file from remote to local; callers are expected to have pre-created the directory chain for out_path""" + pass + + @abstractmethod + def close(self): + """Terminate the connection""" + pass + + def connection_lock(self): + f = self._play_context.connection_lockfd + display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) + fcntl.lockf(f, fcntl.LOCK_EX) + display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) + + def connection_unlock(self): + f = self._play_context.connection_lockfd + fcntl.lockf(f, fcntl.LOCK_UN) + display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) + + def reset(self): + display.warning("Reset is not implemented for this connection") + + def update_vars(self, variables): + ''' + Adds 'magic' variables relating to connections to the variable dictionary provided. + In case users need to access from the play, this is a legacy from runner. + ''' + for varname in C.COMMON_CONNECTION_VARS: + value = None + if varname in variables: + # dont update existing + continue + elif 'password' in varname or 'passwd' in varname: + # no secrets! + continue + elif varname == 'ansible_connection': + # its me mom! + value = self._load_name + elif varname == 'ansible_shell_type': + # its my cousin ... + value = self._shell._load_name + else: + # deal with generic options if the plugin supports em (for exmaple not all connections have a remote user) + options = C.config.get_plugin_options_from_var('connection', self._load_name, varname) + if options: + value = self.get_option(options[0]) # for these variables there should be only one option + elif 'become' not in varname: + # fallback to play_context, unles becoem related TODO: in the end should come from task/play and not pc + for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items(): + if varname in var_list: + try: + value = getattr(self._play_context, prop) + break + except AttributeError: + # it was not defined, fine to ignore + continue + + if value is not None: + display.debug('Set connection var {0} to {1}'.format(varname, value)) + variables[varname] = value + + +class NetworkConnectionBase(ConnectionBase): + """ + A base class for network-style connections. + """ + + force_persistence = True + # Do not use _remote_is_local in other connections + _remote_is_local = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(NetworkConnectionBase, self).__init__(play_context, new_stdin, *args, **kwargs) + self._messages = [] + self._conn_closed = False + + self._network_os = self._play_context.network_os + + self._local = connection_loader.get('local', play_context, '/dev/null') + self._local.set_options() + + self._sub_plugin = {} + self._cached_variables = (None, None, None) + + # reconstruct the socket_path and set instance values accordingly + self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid') + self._update_connection_state() + + def __getattr__(self, name): + try: + return self.__dict__[name] + except KeyError: + if not name.startswith('_'): + plugin = self._sub_plugin.get('obj') + if plugin: + method = getattr(plugin, name, None) + if method is not None: + return method + raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) + + def exec_command(self, cmd, in_data=None, sudoable=True): + return self._local.exec_command(cmd, in_data, sudoable) + + def queue_message(self, level, message): + """ + Adds a message to the queue of messages waiting to be pushed back to the controller process. + + :arg level: A string which can either be the name of a method in display, or 'log'. When + the messages are returned to task_executor, a value of log will correspond to + ``display.display(message, log_only=True)``, while another value will call ``display.[level](message)`` + """ + self._messages.append((level, message)) + + def pop_messages(self): + messages, self._messages = self._messages, [] + return messages + + def put_file(self, in_path, out_path): + """Transfer a file from local to remote""" + return self._local.put_file(in_path, out_path) + + def fetch_file(self, in_path, out_path): + """Fetch a file from remote to local""" + return self._local.fetch_file(in_path, out_path) + + def reset(self): + ''' + Reset the connection + ''' + if self._socket_path: + self.queue_message('vvvv', 'resetting persistent connection for socket_path %s' % self._socket_path) + self.close() + self.queue_message('vvvv', 'reset call on connection instance') + + def close(self): + self._conn_closed = True + if self._connected: + self._connected = False + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(NetworkConnectionBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + if self.get_option('persistent_log_messages'): + warning = "Persistent connection logging is enabled for %s. This will log ALL interactions" % self._play_context.remote_addr + logpath = getattr(C, 'DEFAULT_LOG_PATH') + if logpath is not None: + warning += " to %s" % logpath + self.queue_message('warning', "%s and WILL NOT redact sensitive configuration like passwords. USE WITH CAUTION!" % warning) + + if self._sub_plugin.get('obj') and self._sub_plugin.get('type') != 'external': + try: + self._sub_plugin['obj'].set_options(task_keys=task_keys, var_options=var_options, direct=direct) + except AttributeError: + pass + + def _update_connection_state(self): + ''' + Reconstruct the connection socket_path and check if it exists + + If the socket path exists then the connection is active and set + both the _socket_path value to the path and the _connected value + to True. If the socket path doesn't exist, leave the socket path + value to None and the _connected value to False + ''' + ssh = connection_loader.get('ssh', class_only=True) + control_path = ssh._create_control_path( + self._play_context.remote_addr, self._play_context.port, + self._play_context.remote_user, self._play_context.connection, + self._ansible_playbook_pid + ) + + tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR) + socket_path = unfrackpath(control_path % dict(directory=tmp_path)) + + if os.path.exists(socket_path): + self._connected = True + self._socket_path = socket_path + + def _log_messages(self, message): + if self.get_option('persistent_log_messages'): + self.queue_message('log', message) diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py new file mode 100644 index 0000000..27afd10 --- /dev/null +++ b/lib/ansible/plugins/connection/local.py @@ -0,0 +1,194 @@ +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: local + short_description: execute on controller + description: + - This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host. + author: ansible (@core) + version_added: historical + extends_documentation_fragment: + - connection_pipelining + notes: + - The remote user is ignored, the user with which the ansible CLI was executed is used instead. +''' + +import fcntl +import getpass +import os +import pty +import shutil +import subprocess + +import ansible.constants as C +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils.compat import selectors +from ansible.module_utils.six import text_type, binary_type +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display +from ansible.utils.path import unfrackpath + +display = Display() + + +class Connection(ConnectionBase): + ''' Local based connections ''' + + transport = 'local' + has_pipelining = True + + def __init__(self, *args, **kwargs): + + super(Connection, self).__init__(*args, **kwargs) + self.cwd = None + try: + self.default_user = getpass.getuser() + except KeyError: + display.vv("Current user (uid=%s) does not seem to exist on this system, leaving user empty." % os.getuid()) + self.default_user = "" + + def _connect(self): + ''' connect to the local host; nothing to do here ''' + + # Because we haven't made any remote connection we're running as + # the local user, rather than as whatever is configured in remote_user. + self._play_context.remote_user = self.default_user + + if not self._connected: + display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr) + self._connected = True + return self + + def exec_command(self, cmd, in_data=None, sudoable=True): + ''' run a command on the local host ''' + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + display.debug("in local.exec_command()") + + executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None + + if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')): + raise AnsibleError("failed to find the executable specified %s." + " Please verify if the executable exists and re-try." % executable) + + display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr) + display.debug("opening command with Popen()") + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + + master = None + stdin = subprocess.PIPE + if sudoable and self.become and self.become.expect_prompt() and not self.get_option('pipelining'): + # Create a pty if sudoable for privlege escalation that needs it. + # Falls back to using a standard pipe if this fails, which may + # cause the command to fail in certain situations where we are escalating + # privileges or the command otherwise needs a pty. + try: + master, stdin = pty.openpty() + except (IOError, OSError) as e: + display.debug("Unable to open pty: %s" % to_native(e)) + + p = subprocess.Popen( + cmd, + shell=isinstance(cmd, (text_type, binary_type)), + executable=executable, + cwd=self.cwd, + stdin=stdin, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + # if we created a master, we can close the other half of the pty now, otherwise master is stdin + if master is not None: + os.close(stdin) + + display.debug("done running command with Popen()") + + if self.become and self.become.expect_prompt() and sudoable: + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + selector = selectors.DefaultSelector() + selector.register(p.stdout, selectors.EVENT_READ) + selector.register(p.stderr, selectors.EVENT_READ) + + become_output = b'' + try: + while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output): + events = selector.select(self._play_context.timeout) + if not events: + stdout, stderr = p.communicate() + raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output)) + + for key, event in events: + if key.fileobj == p.stdout: + chunk = p.stdout.read() + elif key.fileobj == p.stderr: + chunk = p.stderr.read() + + if not chunk: + stdout, stderr = p.communicate() + raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output)) + become_output += chunk + finally: + selector.close() + + if not self.become.check_success(become_output): + become_pass = self.become.get_option('become_pass', playcontext=self._play_context) + if master is None: + p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + else: + os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) + + display.debug("getting output with communicate()") + stdout, stderr = p.communicate(in_data) + display.debug("done communicating") + + # finally, close the other half of the pty, if it was created + if master: + os.close(master) + + display.debug("done with local.exec_command()") + return (p.returncode, stdout, stderr) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to local ''' + + super(Connection, self).put_file(in_path, out_path) + + in_path = unfrackpath(in_path, basedir=self.cwd) + out_path = unfrackpath(out_path, basedir=self.cwd) + + display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path))) + try: + shutil.copyfile(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict')) + except shutil.Error: + raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_native(in_path), to_native(out_path))) + except IOError as e: + raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e))) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from local to local -- for compatibility ''' + + super(Connection, self).fetch_file(in_path, out_path) + + display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr) + self.put_file(in_path, out_path) + + def close(self): + ''' terminate the connection; nothing to do here ''' + self._connected = False diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py new file mode 100644 index 0000000..b9fd898 --- /dev/null +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -0,0 +1,695 @@ +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + author: Ansible Core Team + name: paramiko + short_description: Run tasks via python ssh (paramiko) + description: + - Use the python ssh implementation (Paramiko) to connect to targets + - The paramiko transport is provided because many distributions, in particular EL6 and before do not support ControlPersist + in their SSH implementations. + - This is needed on the Ansible control machine to be reasonably efficient with connections. + Thus paramiko is faster for most users on these platforms. + Users with ControlPersist capability can consider using -c ssh or configuring the transport in the configuration file. + - This plugin also borrows a lot of settings from the ssh plugin as they both cover the same protocol. + version_added: "0.1" + options: + remote_addr: + description: + - Address of the remote target + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_ssh_host + - name: ansible_paramiko_host + remote_user: + description: + - User to login/authenticate as + - Can be set from the CLI via the C(--user) or C(-u) options. + vars: + - name: ansible_user + - name: ansible_ssh_user + - name: ansible_paramiko_user + env: + - name: ANSIBLE_REMOTE_USER + - name: ANSIBLE_PARAMIKO_REMOTE_USER + version_added: '2.5' + ini: + - section: defaults + key: remote_user + - section: paramiko_connection + key: remote_user + version_added: '2.5' + keyword: + - name: remote_user + password: + description: + - Secret used to either login the ssh server or as a passphrase for ssh keys that require it + - Can be set from the CLI via the C(--ask-pass) option. + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + - name: ansible_paramiko_pass + - name: ansible_paramiko_password + version_added: '2.5' + use_rsa_sha2_algorithms: + description: + - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys + - On paramiko versions older than 2.9, this only affects hostkeys + - For behavior matching paramiko<2.9 set this to C(False) + vars: + - name: ansible_paramiko_use_rsa_sha2_algorithms + ini: + - {key: use_rsa_sha2_algorithms, section: paramiko_connection} + env: + - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS} + default: True + type: boolean + version_added: '2.14' + host_key_auto_add: + description: 'Automatically add host keys' + env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}] + ini: + - {key: host_key_auto_add, section: paramiko_connection} + type: boolean + look_for_keys: + default: True + description: 'False to disable searching for private key files in ~/.ssh/' + env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}] + ini: + - {key: look_for_keys, section: paramiko_connection} + type: boolean + proxy_command: + default: '' + description: + - Proxy information for running the connection via a jumphost + - Also this plugin will scan 'ssh_args', 'ssh_extra_args' and 'ssh_common_args' from the 'ssh' plugin settings for proxy information if set. + env: [{name: ANSIBLE_PARAMIKO_PROXY_COMMAND}] + ini: + - {key: proxy_command, section: paramiko_connection} + ssh_args: + description: Only used in parsing ProxyCommand for use in this plugin. + default: '' + ini: + - section: 'ssh_connection' + key: 'ssh_args' + env: + - name: ANSIBLE_SSH_ARGS + vars: + - name: ansible_ssh_args + version_added: '2.7' + ssh_common_args: + description: Only used in parsing ProxyCommand for use in this plugin. + ini: + - section: 'ssh_connection' + key: 'ssh_common_args' + version_added: '2.7' + env: + - name: ANSIBLE_SSH_COMMON_ARGS + version_added: '2.7' + vars: + - name: ansible_ssh_common_args + cli: + - name: ssh_common_args + default: '' + ssh_extra_args: + description: Only used in parsing ProxyCommand for use in this plugin. + vars: + - name: ansible_ssh_extra_args + env: + - name: ANSIBLE_SSH_EXTRA_ARGS + version_added: '2.7' + ini: + - key: ssh_extra_args + section: ssh_connection + version_added: '2.7' + cli: + - name: ssh_extra_args + default: '' + pty: + default: True + description: 'SUDO usually requires a PTY, True to give a PTY and False to not give a PTY.' + env: + - name: ANSIBLE_PARAMIKO_PTY + ini: + - section: paramiko_connection + key: pty + type: boolean + record_host_keys: + default: True + description: 'Save the host keys to a file' + env: [{name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS}] + ini: + - section: paramiko_connection + key: record_host_keys + type: boolean + host_key_checking: + description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host' + type: boolean + default: True + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + version_added: '2.5' + - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING + version_added: '2.5' + ini: + - section: defaults + key: host_key_checking + - section: paramiko_connection + key: host_key_checking + version_added: '2.5' + vars: + - name: ansible_host_key_checking + version_added: '2.5' + - name: ansible_ssh_host_key_checking + version_added: '2.5' + - name: ansible_paramiko_host_key_checking + version_added: '2.5' + use_persistent_connections: + description: 'Toggles the use of persistence for connections' + type: boolean + default: False + env: + - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS + ini: + - section: defaults + key: use_persistent_connections + banner_timeout: + type: float + default: 30 + version_added: '2.14' + description: + - Configures, in seconds, the amount of time to wait for the SSH + banner to be presented. This option is supported by paramiko + version 1.15.0 or newer. + ini: + - section: paramiko_connection + key: banner_timeout + env: + - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT +# TODO: +#timeout=self._play_context.timeout, +""" + +import os +import socket +import tempfile +import traceback +import fcntl +import sys +import re + +from termios import tcflush, TCIFLUSH +from ansible.module_utils.compat.version import LooseVersion +from binascii import hexlify + +from ansible.errors import ( + AnsibleAuthenticationFailure, + AnsibleConnectionFailure, + AnsibleError, + AnsibleFileNotFound, +) +from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display +from ansible.utils.path import makedirs_safe +from ansible.module_utils._text import to_bytes, to_native, to_text + +display = Display() + + +AUTHENTICITY_MSG = """ +paramiko: The authenticity of host '%s' can't be established. +The %s key fingerprint is %s. +Are you sure you want to continue connecting (yes/no)? +""" + +# SSH Options Regex +SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)') + + +class MyAddPolicy(object): + """ + Based on AutoAddPolicy in paramiko so we can determine when keys are added + + and also prompt for input. + + Policy for automatically adding the hostname and new host key to the + local L{HostKeys} object, and saving it. This is used by L{SSHClient}. + """ + + def __init__(self, new_stdin, connection): + self._new_stdin = new_stdin + self.connection = connection + self._options = connection._options + + def missing_host_key(self, client, hostname, key): + + if all((self._options['host_key_checking'], not self._options['host_key_auto_add'])): + + fingerprint = hexlify(key.get_fingerprint()) + ktype = key.get_name() + + if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence: + # don't print the prompt string since the user cannot respond + # to the question anyway + raise AnsibleError(AUTHENTICITY_MSG[1:92] % (hostname, ktype, fingerprint)) + + self.connection.connection_lock() + + old_stdin = sys.stdin + sys.stdin = self._new_stdin + + # clear out any premature input on sys.stdin + tcflush(sys.stdin, TCIFLUSH) + + inp = input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint)) + sys.stdin = old_stdin + + self.connection.connection_unlock() + + if inp not in ['yes', 'y', '']: + raise AnsibleError("host connection rejected by user") + + key._added_by_ansible_this_time = True + + # existing implementation below: + client._host_keys.add(hostname, key.get_name(), key) + + # host keys are actually saved in close() function below + # in order to control ordering. + + +# keep connection objects on a per host basis to avoid repeated attempts to reconnect + +SSH_CONNECTION_CACHE = {} # type: dict[str, paramiko.client.SSHClient] +SFTP_CONNECTION_CACHE = {} # type: dict[str, paramiko.sftp_client.SFTPClient] + + +class Connection(ConnectionBase): + ''' SSH based connections with Paramiko ''' + + transport = 'paramiko' + _log_channel = None + + def _cache_key(self): + return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user) + + def _connect(self): + cache_key = self._cache_key() + if cache_key in SSH_CONNECTION_CACHE: + self.ssh = SSH_CONNECTION_CACHE[cache_key] + else: + self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached() + + self._connected = True + return self + + def _set_log_channel(self, name): + '''Mimic paramiko.SSHClient.set_log_channel''' + self._log_channel = name + + def _parse_proxy_command(self, port=22): + proxy_command = None + # Parse ansible_ssh_common_args, specifically looking for ProxyCommand + ssh_args = [ + self.get_option('ssh_extra_args'), + self.get_option('ssh_common_args'), + self.get_option('ssh_args', ''), + ] + + args = self._split_ssh_args(' '.join(ssh_args)) + for i, arg in enumerate(args): + if arg.lower() == 'proxycommand': + # _split_ssh_args split ProxyCommand from the command itself + proxy_command = args[i + 1] + else: + # ProxyCommand and the command itself are a single string + match = SETTINGS_REGEX.match(arg) + if match: + if match.group(1).lower() == 'proxycommand': + proxy_command = match.group(2) + + if proxy_command: + break + + proxy_command = self.get_option('proxy_command') or proxy_command + + sock_kwarg = {} + if proxy_command: + replacers = { + '%h': self._play_context.remote_addr, + '%p': port, + '%r': self._play_context.remote_user + } + for find, replace in replacers.items(): + proxy_command = proxy_command.replace(find, str(replace)) + try: + sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} + display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr) + except AttributeError: + display.warning('Paramiko ProxyCommand support unavailable. ' + 'Please upgrade to Paramiko 1.9.0 or newer. ' + 'Not using configured ProxyCommand') + + return sock_kwarg + + def _connect_uncached(self): + ''' activates the connection object ''' + + if paramiko is None: + raise AnsibleError("paramiko is not installed: %s" % to_native(PARAMIKO_IMPORT_ERR)) + + port = self._play_context.port or 22 + display.vvv("ESTABLISH PARAMIKO SSH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), + host=self._play_context.remote_addr) + + ssh = paramiko.SSHClient() + + # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently + # is keeping or omitting rsa-sha2 algorithms + paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ()) + paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ()) + use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms') + disabled_algorithms = {} + if not use_rsa_sha2_algorithms: + if paramiko_preferred_pubkeys: + disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a) + if paramiko_preferred_hostkeys: + disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a) + + # override paramiko's default logger name + if self._log_channel is not None: + ssh.set_log_channel(self._log_channel) + + self.keyfile = os.path.expanduser("~/.ssh/known_hosts") + + if self.get_option('host_key_checking'): + for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"): + try: + # TODO: check if we need to look at several possible locations, possible for loop + ssh.load_system_host_keys(ssh_known_hosts) + break + except IOError: + pass # file was not found, but not required to function + ssh.load_system_host_keys() + + ssh_connect_kwargs = self._parse_proxy_command(port) + + ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self)) + + conn_password = self.get_option('password') or self._play_context.password + + allow_agent = True + + if conn_password is not None: + allow_agent = False + + try: + key_filename = None + if self._play_context.private_key_file: + key_filename = os.path.expanduser(self._play_context.private_key_file) + + # paramiko 2.2 introduced auth_timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'): + ssh_connect_kwargs['auth_timeout'] = self._play_context.timeout + + # paramiko 1.15 introduced banner timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'): + ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout') + + ssh.connect( + self._play_context.remote_addr.lower(), + username=self._play_context.remote_user, + allow_agent=allow_agent, + look_for_keys=self.get_option('look_for_keys'), + key_filename=key_filename, + password=conn_password, + timeout=self._play_context.timeout, + port=port, + disabled_algorithms=disabled_algorithms, + **ssh_connect_kwargs, + ) + except paramiko.ssh_exception.BadHostKeyException as e: + raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname) + except paramiko.ssh_exception.AuthenticationException as e: + msg = 'Failed to authenticate: {0}'.format(to_text(e)) + raise AnsibleAuthenticationFailure(msg) + except Exception as e: + msg = to_text(e) + if u"PID check failed" in msg: + raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") + elif u"Private key file is encrypted" in msg: + msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % ( + self._play_context.remote_user, self._play_context.remote_addr, port, msg) + raise AnsibleConnectionFailure(msg) + else: + raise AnsibleConnectionFailure(msg) + + return ssh + + def exec_command(self, cmd, in_data=None, sudoable=True): + ''' run a command on the remote host ''' + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + if in_data: + raise AnsibleError("Internal Error: this module does not support optimized module pipelining") + + bufsize = 4096 + + try: + self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() + except Exception as e: + text_e = to_text(e) + msg = u"Failed to open session" + if text_e: + msg += u": %s" % text_e + raise AnsibleConnectionFailure(to_native(msg)) + + # sudo usually requires a PTY (cf. requiretty option), therefore + # we give it one by default (pty=True in ansible.cfg), and we try + # to initialise from the calling environment when sudoable is enabled + if self.get_option('pty') and sudoable: + chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) + + display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr) + + cmd = to_bytes(cmd, errors='surrogate_or_strict') + + no_prompt_out = b'' + no_prompt_err = b'' + become_output = b'' + + try: + chan.exec_command(cmd) + if self.become and self.become.expect_prompt(): + passprompt = False + become_sucess = False + while not (become_sucess or passprompt): + display.debug('Waiting for Privilege Escalation input') + + chunk = chan.recv(bufsize) + display.debug("chunk is: %s" % chunk) + if not chunk: + if b'unknown user' in become_output: + n_become_user = to_native(self.become.get_option('become_user', + playcontext=self._play_context)) + raise AnsibleError('user %s does not exist' % n_become_user) + else: + break + # raise AnsibleError('ssh connection closed waiting for password prompt') + become_output += chunk + + # need to check every line because we might get lectured + # and we might get the middle of a line in a chunk + for l in become_output.splitlines(True): + if self.become.check_success(l): + become_sucess = True + break + elif self.become.check_password_prompt(l): + passprompt = True + break + + if passprompt: + if self.become: + become_pass = self.become.get_option('become_pass', playcontext=self._play_context) + chan.sendall(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + else: + raise AnsibleError("A password is required but none was supplied") + else: + no_prompt_out += become_output + no_prompt_err += become_output + except socket.timeout: + raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output) + + stdout = b''.join(chan.makefile('rb', bufsize)) + stderr = b''.join(chan.makefile_stderr('rb', bufsize)) + + return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + super(Connection, self).put_file(in_path, out_path) + + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) + + try: + self.sftp = self.ssh.open_sftp() + except Exception as e: + raise AnsibleError("failed to open a SFTP connection (%s)" % e) + + try: + self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict')) + except IOError: + raise AnsibleError("failed to transfer file to %s" % out_path) + + def _connect_sftp(self): + + cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user) + if cache_key in SFTP_CONNECTION_CACHE: + return SFTP_CONNECTION_CACHE[cache_key] + else: + result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp() + return result + + def fetch_file(self, in_path, out_path): + ''' save a remote file to the specified path ''' + + super(Connection, self).fetch_file(in_path, out_path) + + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + try: + self.sftp = self._connect_sftp() + except Exception as e: + raise AnsibleError("failed to open a SFTP connection (%s)" % to_native(e)) + + try: + self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict')) + except IOError: + raise AnsibleError("failed to transfer file from %s" % in_path) + + def _any_keys_added(self): + + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + return True + return False + + def _save_ssh_host_keys(self, filename): + ''' + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) + ''' + + if not self._any_keys_added(): + return False + + path = os.path.expanduser("~/.ssh") + makedirs_safe(path) + + with open(filename, 'w') as f: + + for hostname, keys in self.ssh._host_keys.items(): + + for keytype, key in keys.items(): + + # was f.write + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if not added_this_time: + f.write("%s %s %s\n" % (hostname, keytype, key.get_base64())) + + for hostname, keys in self.ssh._host_keys.items(): + + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + f.write("%s %s %s\n" % (hostname, keytype, key.get_base64())) + + def reset(self): + if not self._connected: + return + self.close() + self._connect() + + def close(self): + ''' terminate the connection ''' + + cache_key = self._cache_key() + SSH_CONNECTION_CACHE.pop(cache_key, None) + SFTP_CONNECTION_CACHE.pop(cache_key, None) + + if hasattr(self, 'sftp'): + if self.sftp is not None: + self.sftp.close() + + if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added(): + + # add any new SSH host keys -- warning -- this could be slow + # (This doesn't acquire the connection lock because it needs + # to exclude only other known_hosts writers, not connections + # that are starting up.) + lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock") + dirname = os.path.dirname(self.keyfile) + makedirs_safe(dirname) + + KEY_LOCK = open(lockfile, 'w') + fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX) + + try: + # just in case any were added recently + + self.ssh.load_system_host_keys() + self.ssh._host_keys.update(self.ssh._system_host_keys) + + # gather information about the current key file, so + # we can ensure the new file has the correct mode/owner + + key_dir = os.path.dirname(self.keyfile) + if os.path.exists(self.keyfile): + key_stat = os.stat(self.keyfile) + mode = key_stat.st_mode + uid = key_stat.st_uid + gid = key_stat.st_gid + else: + mode = 33188 + uid = os.getuid() + gid = os.getgid() + + # Save the new keys to a temporary file and move it into place + # rather than rewriting the file. We set delete=False because + # the file will be moved into place rather than cleaned up. + + tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False) + os.chmod(tmp_keyfile.name, mode & 0o7777) + os.chown(tmp_keyfile.name, uid, gid) + + self._save_ssh_host_keys(tmp_keyfile.name) + tmp_keyfile.close() + + os.rename(tmp_keyfile.name, self.keyfile) + + except Exception: + + # unable to save keys, including scenario when key was invalid + # and caught earlier + traceback.print_exc() + fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN) + + self.ssh.close() + self._connected = False diff --git a/lib/ansible/plugins/connection/psrp.py b/lib/ansible/plugins/connection/psrp.py new file mode 100644 index 0000000..dfcf0e5 --- /dev/null +++ b/lib/ansible/plugins/connection/psrp.py @@ -0,0 +1,898 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +author: Ansible Core Team +name: psrp +short_description: Run tasks over Microsoft PowerShell Remoting Protocol +description: +- Run commands or put/fetch on a target via PSRP (WinRM plugin) +- This is similar to the I(winrm) connection plugin which uses the same + underlying transport but instead runs in a PowerShell interpreter. +version_added: "2.7" +requirements: +- pypsrp>=0.4.0 (Python library) +extends_documentation_fragment: + - connection_pipelining +options: + # transport options + remote_addr: + description: + - The hostname or IP address of the remote host. + default: inventory_hostname + type: str + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_psrp_host + remote_user: + description: + - The user to log in as. + type: str + vars: + - name: ansible_user + - name: ansible_psrp_user + keyword: + - name: remote_user + remote_password: + description: Authentication password for the C(remote_user). Can be supplied as CLI option. + type: str + vars: + - name: ansible_password + - name: ansible_winrm_pass + - name: ansible_winrm_password + aliases: + - password # Needed for --ask-pass to come through on delegation + port: + description: + - The port for PSRP to connect on the remote target. + - Default is C(5986) if I(protocol) is not defined or is C(https), + otherwise the port is C(5985). + type: int + vars: + - name: ansible_port + - name: ansible_psrp_port + keyword: + - name: port + protocol: + description: + - Set the protocol to use for the connection. + - Default is C(https) if I(port) is not defined or I(port) is not C(5985). + choices: + - http + - https + type: str + vars: + - name: ansible_psrp_protocol + path: + description: + - The URI path to connect to. + type: str + vars: + - name: ansible_psrp_path + default: 'wsman' + auth: + description: + - The authentication protocol to use when authenticating the remote user. + - The default, C(negotiate), will attempt to use C(Kerberos) if it is + available and fall back to C(NTLM) if it isn't. + type: str + vars: + - name: ansible_psrp_auth + choices: + - basic + - certificate + - negotiate + - kerberos + - ntlm + - credssp + default: negotiate + cert_validation: + description: + - Whether to validate the remote server's certificate or not. + - Set to C(ignore) to not validate any certificates. + - I(ca_cert) can be set to the path of a PEM certificate chain to + use in the validation. + choices: + - validate + - ignore + default: validate + type: str + vars: + - name: ansible_psrp_cert_validation + ca_cert: + description: + - The path to a PEM certificate chain to use when validating the server's + certificate. + - This value is ignored if I(cert_validation) is set to C(ignore). + type: path + vars: + - name: ansible_psrp_cert_trust_path + - name: ansible_psrp_ca_cert + aliases: [ cert_trust_path ] + connection_timeout: + description: + - The connection timeout for making the request to the remote host. + - This is measured in seconds. + type: int + vars: + - name: ansible_psrp_connection_timeout + default: 30 + read_timeout: + description: + - The read timeout for receiving data from the remote host. + - This value must always be greater than I(operation_timeout). + - This option requires pypsrp >= 0.3. + - This is measured in seconds. + type: int + vars: + - name: ansible_psrp_read_timeout + default: 30 + version_added: '2.8' + reconnection_retries: + description: + - The number of retries on connection errors. + type: int + vars: + - name: ansible_psrp_reconnection_retries + default: 0 + version_added: '2.8' + reconnection_backoff: + description: + - The backoff time to use in between reconnection attempts. + (First sleeps X, then sleeps 2*X, then sleeps 4*X, ...) + - This is measured in seconds. + - The C(ansible_psrp_reconnection_backoff) variable was added in Ansible + 2.9. + type: int + vars: + - name: ansible_psrp_connection_backoff + - name: ansible_psrp_reconnection_backoff + default: 2 + version_added: '2.8' + message_encryption: + description: + - Controls the message encryption settings, this is different from TLS + encryption when I(ansible_psrp_protocol) is C(https). + - Only the auth protocols C(negotiate), C(kerberos), C(ntlm), and + C(credssp) can do message encryption. The other authentication protocols + only support encryption when C(protocol) is set to C(https). + - C(auto) means means message encryption is only used when not using + TLS/HTTPS. + - C(always) is the same as C(auto) but message encryption is always used + even when running over TLS/HTTPS. + - C(never) disables any encryption checks that are in place when running + over HTTP and disables any authentication encryption processes. + type: str + vars: + - name: ansible_psrp_message_encryption + choices: + - auto + - always + - never + default: auto + proxy: + description: + - Set the proxy URL to use when connecting to the remote host. + vars: + - name: ansible_psrp_proxy + type: str + ignore_proxy: + description: + - Will disable any environment proxy settings and connect directly to the + remote host. + - This option is ignored if C(proxy) is set. + vars: + - name: ansible_psrp_ignore_proxy + type: bool + default: 'no' + + # auth options + certificate_key_pem: + description: + - The local path to an X509 certificate key to use with certificate auth. + type: path + vars: + - name: ansible_psrp_certificate_key_pem + certificate_pem: + description: + - The local path to an X509 certificate to use with certificate auth. + type: path + vars: + - name: ansible_psrp_certificate_pem + credssp_auth_mechanism: + description: + - The sub authentication mechanism to use with CredSSP auth. + - When C(auto), both Kerberos and NTLM is attempted with kerberos being + preferred. + type: str + choices: + - auto + - kerberos + - ntlm + default: auto + vars: + - name: ansible_psrp_credssp_auth_mechanism + credssp_disable_tlsv1_2: + description: + - Disables the use of TLSv1.2 on the CredSSP authentication channel. + - This should not be set to C(yes) unless dealing with a host that does not + have TLSv1.2. + default: no + type: bool + vars: + - name: ansible_psrp_credssp_disable_tlsv1_2 + credssp_minimum_version: + description: + - The minimum CredSSP server authentication version that will be accepted. + - Set to C(5) to ensure the server has been patched and is not vulnerable + to CVE 2018-0886. + default: 2 + type: int + vars: + - name: ansible_psrp_credssp_minimum_version + negotiate_delegate: + description: + - Allow the remote user the ability to delegate it's credentials to another + server, i.e. credential delegation. + - Only valid when Kerberos was the negotiated auth or was explicitly set as + the authentication. + - Ignored when NTLM was the negotiated auth. + type: bool + vars: + - name: ansible_psrp_negotiate_delegate + negotiate_hostname_override: + description: + - Override the remote hostname when searching for the host in the Kerberos + lookup. + - This allows Ansible to connect over IP but authenticate with the remote + server using it's DNS name. + - Only valid when Kerberos was the negotiated auth or was explicitly set as + the authentication. + - Ignored when NTLM was the negotiated auth. + type: str + vars: + - name: ansible_psrp_negotiate_hostname_override + negotiate_send_cbt: + description: + - Send the Channel Binding Token (CBT) structure when authenticating. + - CBT is used to provide extra protection against Man in the Middle C(MitM) + attacks by binding the outer transport channel to the auth channel. + - CBT is not used when using just C(HTTP), only C(HTTPS). + default: yes + type: bool + vars: + - name: ansible_psrp_negotiate_send_cbt + negotiate_service: + description: + - Override the service part of the SPN used during Kerberos authentication. + - Only valid when Kerberos was the negotiated auth or was explicitly set as + the authentication. + - Ignored when NTLM was the negotiated auth. + default: WSMAN + type: str + vars: + - name: ansible_psrp_negotiate_service + + # protocol options + operation_timeout: + description: + - Sets the WSMan timeout for each operation. + - This is measured in seconds. + - This should not exceed the value for C(connection_timeout). + type: int + vars: + - name: ansible_psrp_operation_timeout + default: 20 + max_envelope_size: + description: + - Sets the maximum size of each WSMan message sent to the remote host. + - This is measured in bytes. + - Defaults to C(150KiB) for compatibility with older hosts. + type: int + vars: + - name: ansible_psrp_max_envelope_size + default: 153600 + configuration_name: + description: + - The name of the PowerShell configuration endpoint to connect to. + type: str + vars: + - name: ansible_psrp_configuration_name + default: Microsoft.PowerShell +""" + +import base64 +import json +import logging +import os + +from ansible import constants as C +from ansible.errors import AnsibleConnectionFailure, AnsibleError +from ansible.errors import AnsibleFileNotFound +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase +from ansible.plugins.shell.powershell import _common_args +from ansible.utils.display import Display +from ansible.utils.hashing import sha1 + +HAS_PYPSRP = True +PYPSRP_IMP_ERR = None +try: + import pypsrp + from pypsrp.complex_objects import GenericComplexObject, PSInvocationState, RunspacePoolState + from pypsrp.exceptions import AuthenticationError, WinRMError + from pypsrp.host import PSHost, PSHostUserInterface + from pypsrp.powershell import PowerShell, RunspacePool + from pypsrp.wsman import WSMan, AUTH_KWARGS + from requests.exceptions import ConnectionError, ConnectTimeout +except ImportError as err: + HAS_PYPSRP = False + PYPSRP_IMP_ERR = err + +display = Display() + + +class Connection(ConnectionBase): + + transport = 'psrp' + module_implementation_preferences = ('.ps1', '.exe', '') + allow_executable = False + has_pipelining = True + allow_extras = True + + def __init__(self, *args, **kwargs): + self.always_pipeline_modules = True + self.has_native_async = True + + self.runspace = None + self.host = None + self._last_pipeline = False + + self._shell_type = 'powershell' + super(Connection, self).__init__(*args, **kwargs) + + if not C.DEFAULT_DEBUG: + logging.getLogger('pypsrp').setLevel(logging.WARNING) + logging.getLogger('requests_credssp').setLevel(logging.INFO) + logging.getLogger('urllib3').setLevel(logging.INFO) + + def _connect(self): + if not HAS_PYPSRP: + raise AnsibleError("pypsrp or dependencies are not installed: %s" + % to_native(PYPSRP_IMP_ERR)) + super(Connection, self)._connect() + self._build_kwargs() + display.vvv("ESTABLISH PSRP CONNECTION FOR USER: %s ON PORT %s TO %s" % + (self._psrp_user, self._psrp_port, self._psrp_host), + host=self._psrp_host) + + if not self.runspace: + connection = WSMan(**self._psrp_conn_kwargs) + + # create our pseudo host to capture the exit code and host output + host_ui = PSHostUserInterface() + self.host = PSHost(None, None, False, "Ansible PSRP Host", None, + host_ui, None) + + self.runspace = RunspacePool( + connection, host=self.host, + configuration_name=self._psrp_configuration_name + ) + display.vvvvv( + "PSRP OPEN RUNSPACE: auth=%s configuration=%s endpoint=%s" % + (self._psrp_auth, self._psrp_configuration_name, + connection.transport.endpoint), host=self._psrp_host + ) + try: + self.runspace.open() + except AuthenticationError as e: + raise AnsibleConnectionFailure("failed to authenticate with " + "the server: %s" % to_native(e)) + except WinRMError as e: + raise AnsibleConnectionFailure( + "psrp connection failure during runspace open: %s" + % to_native(e) + ) + except (ConnectionError, ConnectTimeout) as e: + raise AnsibleConnectionFailure( + "Failed to connect to the host via PSRP: %s" + % to_native(e) + ) + + self._connected = True + self._last_pipeline = None + return self + + def reset(self): + if not self._connected: + self.runspace = None + return + + # Try out best to ensure the runspace is closed to free up server side resources + try: + self.close() + except Exception as e: + # There's a good chance the connection was already closed so just log the error and move on + display.debug("PSRP reset - failed to closed runspace: %s" % to_text(e)) + + display.vvvvv("PSRP: Reset Connection", host=self._psrp_host) + self.runspace = None + self._connect() + + def exec_command(self, cmd, in_data=None, sudoable=True): + super(Connection, self).exec_command(cmd, in_data=in_data, + sudoable=sudoable) + + if cmd.startswith(" ".join(_common_args) + " -EncodedCommand"): + # This is a PowerShell script encoded by the shell plugin, we will + # decode the script and execute it in the runspace instead of + # starting a new interpreter to save on time + b_command = base64.b64decode(cmd.split(" ")[-1]) + script = to_text(b_command, 'utf-16-le') + in_data = to_text(in_data, errors="surrogate_or_strict", nonstring="passthru") + + if in_data and in_data.startswith(u"#!"): + # ANSIBALLZ wrapper, we need to get the interpreter and execute + # that as the script - note this won't work as basic.py relies + # on packages not available on Windows, once fixed we can enable + # this path + interpreter = to_native(in_data.splitlines()[0][2:]) + # script = "$input | &'%s' -" % interpreter + # in_data = to_text(in_data) + raise AnsibleError("cannot run the interpreter '%s' on the psrp " + "connection plugin" % interpreter) + + # call build_module_command to get the bootstrap wrapper text + bootstrap_wrapper = self._shell.build_module_command('', '', '') + if bootstrap_wrapper == cmd: + # Do not display to the user each invocation of the bootstrap wrapper + display.vvv("PSRP: EXEC (via pipeline wrapper)") + else: + display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host) + else: + # In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the + # rc is propagated back to the connection plugin. + script = to_text(u"%s\nexit $LASTEXITCODE" % cmd) + display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host) + + rc, stdout, stderr = self._exec_psrp_script(script, in_data) + return rc, stdout, stderr + + def put_file(self, in_path, out_path): + super(Connection, self).put_file(in_path, out_path) + + out_path = self._shell._unquote(out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host) + + copy_script = '''begin { + $ErrorActionPreference = "Stop" + $WarningPreference = "Continue" + $path = $MyInvocation.UnboundArguments[0] + $fd = [System.IO.File]::Create($path) + $algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() + $bytes = @() + + $bindingFlags = [System.Reflection.BindingFlags]'NonPublic, Instance' + Function Get-Property { + <# + .SYNOPSIS + Gets the private/internal property specified of the object passed in. + #> + Param ( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)] + [System.Object] + $Object, + + [Parameter(Mandatory=$true, Position=1)] + [System.String] + $Name + ) + + $Object.GetType().GetProperty($Name, $bindingFlags).GetValue($Object, $null) + } + + Function Set-Property { + <# + .SYNOPSIS + Sets the private/internal property specified on the object passed in. + #> + Param ( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)] + [System.Object] + $Object, + + [Parameter(Mandatory=$true, Position=1)] + [System.String] + $Name, + + [Parameter(Mandatory=$true, Position=2)] + [AllowNull()] + [System.Object] + $Value + ) + + $Object.GetType().GetProperty($Name, $bindingFlags).SetValue($Object, $Value, $null) + } + + Function Get-Field { + <# + .SYNOPSIS + Gets the private/internal field specified of the object passed in. + #> + Param ( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)] + [System.Object] + $Object, + + [Parameter(Mandatory=$true, Position=1)] + [System.String] + $Name + ) + + $Object.GetType().GetField($Name, $bindingFlags).GetValue($Object) + } + + # MaximumAllowedMemory is required to be set to so we can send input data that exceeds the limit on a PS + # Runspace. We use reflection to access/set this property as it is not accessible publicly. This is not ideal + # but works on all PowerShell versions I've tested with. We originally used WinRS to send the raw bytes to the + # host but this falls flat if someone is using a custom PS configuration name so this is a workaround. This + # isn't required for smaller files so if it fails we ignore the error and hope it wasn't needed. + # https://github.com/PowerShell/PowerShell/blob/c8e72d1e664b1ee04a14f226adf655cced24e5f0/src/System.Management.Automation/engine/serialization.cs#L325 + try { + $Host | Get-Property 'ExternalHost' | ` + Get-Field '_transportManager' | ` + Get-Property 'Fragmentor' | ` + Get-Property 'DeserializationContext' | ` + Set-Property 'MaximumAllowedMemory' $null + } catch {} +} +process { + $bytes = [System.Convert]::FromBase64String($input) + $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null + $fd.Write($bytes, 0, $bytes.Length) +} +end { + $fd.Close() + + $algo.TransformFinalBlock($bytes, 0, 0) > $null + $hash = [System.BitConverter]::ToString($algo.Hash).Replace('-', '').ToLowerInvariant() + Write-Output -InputObject "{`"sha1`":`"$hash`"}" +} +''' + + # Get the buffer size of each fragment to send, subtract 82 for the fragment, message, and other header info + # fields that PSRP adds. Adjust to size of the base64 encoded bytes length. + buffer_size = int((self.runspace.connection.max_payload_size - 82) / 4 * 3) + + sha1_hash = sha1() + + b_in_path = to_bytes(in_path, errors='surrogate_or_strict') + if not os.path.exists(b_in_path): + raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path)) + + def read_gen(): + offset = 0 + + with open(b_in_path, 'rb') as src_fd: + for b_data in iter((lambda: src_fd.read(buffer_size)), b""): + data_len = len(b_data) + offset += data_len + sha1_hash.update(b_data) + + # PSRP technically supports sending raw bytes but that method requires a larger CLIXML message. + # Sending base64 is still more efficient here. + display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" % (in_path, out_path, offset, data_len), + host=self._psrp_host) + b64_data = base64.b64encode(b_data) + yield [to_text(b64_data)] + + if offset == 0: # empty file + yield [""] + + rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path]) + + if rc != 0: + raise AnsibleError(to_native(stderr)) + + put_output = json.loads(to_text(stdout)) + local_sha1 = sha1_hash.hexdigest() + remote_sha1 = put_output.get("sha1") + + if not remote_sha1: + raise AnsibleError("Remote sha1 was not returned, stdout: '%s', stderr: '%s'" + % (to_native(stdout), to_native(stderr))) + + if not remote_sha1 == local_sha1: + raise AnsibleError("Remote sha1 hash %s does not match local hash %s" + % (to_native(remote_sha1), to_native(local_sha1))) + + def fetch_file(self, in_path, out_path): + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), + host=self._psrp_host) + + in_path = self._shell._unquote(in_path) + out_path = out_path.replace('\\', '/') + + # because we are dealing with base64 data we need to get the max size + # of the bytes that the base64 size would equal + max_b64_size = int(self.runspace.connection.max_payload_size - + (self.runspace.connection.max_payload_size / 4 * 3)) + buffer_size = max_b64_size - (max_b64_size % 1024) + + # setup the file stream with read only mode + setup_script = '''$ErrorActionPreference = "Stop" +$path = '%s' + +if (Test-Path -Path $path -PathType Leaf) { + $fs = New-Object -TypeName System.IO.FileStream -ArgumentList @( + $path, + [System.IO.FileMode]::Open, + [System.IO.FileAccess]::Read, + [System.IO.FileShare]::Read + ) + $buffer_size = %d +} elseif (Test-Path -Path $path -PathType Container) { + Write-Output -InputObject "[DIR]" +} else { + Write-Error -Message "$path does not exist" + $host.SetShouldExit(1) +}''' % (self._shell._escape(in_path), buffer_size) + + # read the file stream at the offset and return the b64 string + read_script = '''$ErrorActionPreference = "Stop" +$fs.Seek(%d, [System.IO.SeekOrigin]::Begin) > $null +$buffer = New-Object -TypeName byte[] -ArgumentList $buffer_size +$bytes_read = $fs.Read($buffer, 0, $buffer_size) + +if ($bytes_read -gt 0) { + $bytes = $buffer[0..($bytes_read - 1)] + Write-Output -InputObject ([System.Convert]::ToBase64String($bytes)) +}''' + + # need to run the setup script outside of the local scope so the + # file stream stays active between fetch operations + rc, stdout, stderr = self._exec_psrp_script(setup_script, + use_local_scope=False) + if rc != 0: + raise AnsibleError("failed to setup file stream for fetch '%s': %s" + % (out_path, to_native(stderr))) + elif stdout.strip() == '[DIR]': + # to be consistent with other connection plugins, we assume the caller has created the target dir + return + + b_out_path = to_bytes(out_path, errors='surrogate_or_strict') + # to be consistent with other connection plugins, we assume the caller has created the target dir + offset = 0 + with open(b_out_path, 'wb') as out_file: + while True: + display.vvvvv("PSRP FETCH %s to %s (offset=%d" % + (in_path, out_path, offset), host=self._psrp_host) + rc, stdout, stderr = self._exec_psrp_script(read_script % offset) + if rc != 0: + raise AnsibleError("failed to transfer file to '%s': %s" + % (out_path, to_native(stderr))) + + data = base64.b64decode(stdout.strip()) + out_file.write(data) + if len(data) < buffer_size: + break + offset += len(data) + + rc, stdout, stderr = self._exec_psrp_script("$fs.Close()") + if rc != 0: + display.warning("failed to close remote file stream of file " + "'%s': %s" % (in_path, to_native(stderr))) + + def close(self): + if self.runspace and self.runspace.state == RunspacePoolState.OPENED: + display.vvvvv("PSRP CLOSE RUNSPACE: %s" % (self.runspace.id), + host=self._psrp_host) + self.runspace.close() + self.runspace = None + self._connected = False + self._last_pipeline = None + + def _build_kwargs(self): + self._psrp_host = self.get_option('remote_addr') + self._psrp_user = self.get_option('remote_user') + self._psrp_pass = self.get_option('remote_password') + + protocol = self.get_option('protocol') + port = self.get_option('port') + if protocol is None and port is None: + protocol = 'https' + port = 5986 + elif protocol is None: + protocol = 'https' if int(port) != 5985 else 'http' + elif port is None: + port = 5986 if protocol == 'https' else 5985 + + self._psrp_protocol = protocol + self._psrp_port = int(port) + + self._psrp_path = self.get_option('path') + self._psrp_auth = self.get_option('auth') + # cert validation can either be a bool or a path to the cert + cert_validation = self.get_option('cert_validation') + cert_trust_path = self.get_option('ca_cert') + if cert_validation == 'ignore': + self._psrp_cert_validation = False + elif cert_trust_path is not None: + self._psrp_cert_validation = cert_trust_path + else: + self._psrp_cert_validation = True + + self._psrp_connection_timeout = self.get_option('connection_timeout') # Can be None + self._psrp_read_timeout = self.get_option('read_timeout') # Can be None + self._psrp_message_encryption = self.get_option('message_encryption') + self._psrp_proxy = self.get_option('proxy') + self._psrp_ignore_proxy = boolean(self.get_option('ignore_proxy')) + self._psrp_operation_timeout = int(self.get_option('operation_timeout')) + self._psrp_max_envelope_size = int(self.get_option('max_envelope_size')) + self._psrp_configuration_name = self.get_option('configuration_name') + self._psrp_reconnection_retries = int(self.get_option('reconnection_retries')) + self._psrp_reconnection_backoff = float(self.get_option('reconnection_backoff')) + + self._psrp_certificate_key_pem = self.get_option('certificate_key_pem') + self._psrp_certificate_pem = self.get_option('certificate_pem') + self._psrp_credssp_auth_mechanism = self.get_option('credssp_auth_mechanism') + self._psrp_credssp_disable_tlsv1_2 = self.get_option('credssp_disable_tlsv1_2') + self._psrp_credssp_minimum_version = self.get_option('credssp_minimum_version') + self._psrp_negotiate_send_cbt = self.get_option('negotiate_send_cbt') + self._psrp_negotiate_delegate = self.get_option('negotiate_delegate') + self._psrp_negotiate_hostname_override = self.get_option('negotiate_hostname_override') + self._psrp_negotiate_service = self.get_option('negotiate_service') + + supported_args = [] + for auth_kwarg in AUTH_KWARGS.values(): + supported_args.extend(auth_kwarg) + extra_args = {v.replace('ansible_psrp_', '') for v in self.get_option('_extras')} + unsupported_args = extra_args.difference(supported_args) + + for arg in unsupported_args: + display.warning("ansible_psrp_%s is unsupported by the current " + "psrp version installed" % arg) + + self._psrp_conn_kwargs = dict( + server=self._psrp_host, port=self._psrp_port, + username=self._psrp_user, password=self._psrp_pass, + ssl=self._psrp_protocol == 'https', path=self._psrp_path, + auth=self._psrp_auth, cert_validation=self._psrp_cert_validation, + connection_timeout=self._psrp_connection_timeout, + encryption=self._psrp_message_encryption, proxy=self._psrp_proxy, + no_proxy=self._psrp_ignore_proxy, + max_envelope_size=self._psrp_max_envelope_size, + operation_timeout=self._psrp_operation_timeout, + certificate_key_pem=self._psrp_certificate_key_pem, + certificate_pem=self._psrp_certificate_pem, + credssp_auth_mechanism=self._psrp_credssp_auth_mechanism, + credssp_disable_tlsv1_2=self._psrp_credssp_disable_tlsv1_2, + credssp_minimum_version=self._psrp_credssp_minimum_version, + negotiate_send_cbt=self._psrp_negotiate_send_cbt, + negotiate_delegate=self._psrp_negotiate_delegate, + negotiate_hostname_override=self._psrp_negotiate_hostname_override, + negotiate_service=self._psrp_negotiate_service, + ) + + # Check if PSRP version supports newer read_timeout argument (needs pypsrp 0.3.0+) + if hasattr(pypsrp, 'FEATURES') and 'wsman_read_timeout' in pypsrp.FEATURES: + self._psrp_conn_kwargs['read_timeout'] = self._psrp_read_timeout + elif self._psrp_read_timeout is not None: + display.warning("ansible_psrp_read_timeout is unsupported by the current psrp version installed, " + "using ansible_psrp_connection_timeout value for read_timeout instead.") + + # Check if PSRP version supports newer reconnection_retries argument (needs pypsrp 0.3.0+) + if hasattr(pypsrp, 'FEATURES') and 'wsman_reconnections' in pypsrp.FEATURES: + self._psrp_conn_kwargs['reconnection_retries'] = self._psrp_reconnection_retries + self._psrp_conn_kwargs['reconnection_backoff'] = self._psrp_reconnection_backoff + else: + if self._psrp_reconnection_retries is not None: + display.warning("ansible_psrp_reconnection_retries is unsupported by the current psrp version installed.") + if self._psrp_reconnection_backoff is not None: + display.warning("ansible_psrp_reconnection_backoff is unsupported by the current psrp version installed.") + + # add in the extra args that were set + for arg in extra_args.intersection(supported_args): + option = self.get_option('_extras')['ansible_psrp_%s' % arg] + self._psrp_conn_kwargs[arg] = option + + def _exec_psrp_script(self, script, input_data=None, use_local_scope=True, arguments=None): + # Check if there's a command on the current pipeline that still needs to be closed. + if self._last_pipeline: + # Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we + # can call stop without any issues. + self._last_pipeline.state = PSInvocationState.RUNNING + self._last_pipeline.stop() + self._last_pipeline = None + + ps = PowerShell(self.runspace) + ps.add_script(script, use_local_scope=use_local_scope) + if arguments: + for arg in arguments: + ps.add_argument(arg) + + ps.invoke(input=input_data) + + rc, stdout, stderr = self._parse_pipeline_result(ps) + + # We should really call .stop() on all pipelines that are run to decrement the concurrent command counter on + # PSSession but that involves another round trip and is done when the runspace is closed. We instead store the + # last pipeline which is closed if another command is run on the runspace. + self._last_pipeline = ps + + return rc, stdout, stderr + + def _parse_pipeline_result(self, pipeline): + """ + PSRP doesn't have the same concept as other protocols with its output. + We need some extra logic to convert the pipeline streams and host + output into the format that Ansible understands. + + :param pipeline: The finished PowerShell pipeline that invoked our + commands + :return: rc, stdout, stderr based on the pipeline output + """ + # we try and get the rc from our host implementation, this is set if + # exit or $host.SetShouldExit() is called in our pipeline, if not we + # set to 0 if the pipeline had not errors and 1 if it did + rc = self.host.rc or (1 if pipeline.had_errors else 0) + + # TODO: figure out a better way of merging this with the host output + stdout_list = [] + for output in pipeline.output: + # Not all pipeline outputs are a string or contain a __str__ value, + # we will create our own output based on the properties of the + # complex object if that is the case. + if isinstance(output, GenericComplexObject) and output.to_string is None: + obj_lines = output.property_sets + for key, value in output.adapted_properties.items(): + obj_lines.append(u"%s: %s" % (key, value)) + for key, value in output.extended_properties.items(): + obj_lines.append(u"%s: %s" % (key, value)) + output_msg = u"\n".join(obj_lines) + else: + output_msg = to_text(output, nonstring='simplerepr') + + stdout_list.append(output_msg) + + if len(self.host.ui.stdout) > 0: + stdout_list += self.host.ui.stdout + stdout = u"\r\n".join(stdout_list) + + stderr_list = [] + for error in pipeline.streams.error: + # the error record is not as fully fleshed out like we usually get + # in PS, we will manually create it here + command_name = "%s : " % error.command_name if error.command_name else '' + position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else '' + error_msg = "%s%s\r\n%s" \ + " + CategoryInfo : %s\r\n" \ + " + FullyQualifiedErrorId : %s" \ + % (command_name, str(error), position, + error.message, error.fq_error) + stacktrace = error.script_stacktrace + if display.verbosity >= 3 and stacktrace is not None: + error_msg += "\r\nStackTrace:\r\n%s" % stacktrace + stderr_list.append(error_msg) + + if len(self.host.ui.stderr) > 0: + stderr_list += self.host.ui.stderr + stderr = u"\r\n".join([to_text(o) for o in stderr_list]) + + display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host) + display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host) + display.vvvvv("PSRP STDERR: %s" % stderr, host=self._psrp_host) + + # reset the host back output back to defaults, needed if running + # multiple pipelines on the same RunspacePool + self.host.rc = 0 + self.host.ui.stdout = [] + self.host.ui.stderr = [] + + return rc, to_bytes(stdout, encoding='utf-8'), to_bytes(stderr, encoding='utf-8') diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py new file mode 100644 index 0000000..e4d9628 --- /dev/null +++ b/lib/ansible/plugins/connection/ssh.py @@ -0,0 +1,1399 @@ +# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com> +# Copyright 2017 Toshio Kuratomi <tkuratomi@ansible.com> +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: ssh + short_description: connect via SSH client binary + description: + - This connection plugin allows Ansible to communicate to the target machines through normal SSH command line. + - Ansible does not expose a channel to allow communication between the user and the SSH process to accept + a password manually to decrypt an SSH key when using this connection plugin (which is the default). The + use of C(ssh-agent) is highly recommended. + author: ansible (@core) + extends_documentation_fragment: + - connection_pipelining + version_added: historical + notes: + - Many options default to C(None) here but that only means we do not override the SSH tool's defaults and/or configuration. + For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config). + - The ssh CLI tool uses return code 255 as a 'connection error', this can conflict with commands/tools that + also return 255 as an error code and will look like an 'unreachable' condition or 'connection error' to this plugin. + options: + host: + description: Hostname/IP to connect to. + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_ssh_host + - name: delegated_vars['ansible_host'] + - name: delegated_vars['ansible_ssh_host'] + host_key_checking: + description: Determines if SSH should check host keys. + default: True + type: boolean + ini: + - section: defaults + key: 'host_key_checking' + - section: ssh_connection + key: 'host_key_checking' + version_added: '2.5' + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + version_added: '2.5' + vars: + - name: ansible_host_key_checking + version_added: '2.5' + - name: ansible_ssh_host_key_checking + version_added: '2.5' + password: + description: Authentication password for the C(remote_user). Can be supplied as CLI option. + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + sshpass_prompt: + description: + - Password prompt that sshpass should search for. Supported by sshpass 1.06 and up. + - Defaults to C(Enter PIN for) when pkcs11_provider is set. + default: '' + ini: + - section: 'ssh_connection' + key: 'sshpass_prompt' + env: + - name: ANSIBLE_SSHPASS_PROMPT + vars: + - name: ansible_sshpass_prompt + version_added: '2.10' + ssh_args: + description: Arguments to pass to all SSH CLI tools. + default: '-C -o ControlMaster=auto -o ControlPersist=60s' + ini: + - section: 'ssh_connection' + key: 'ssh_args' + env: + - name: ANSIBLE_SSH_ARGS + vars: + - name: ansible_ssh_args + version_added: '2.7' + ssh_common_args: + description: Common extra args for all SSH CLI tools. + ini: + - section: 'ssh_connection' + key: 'ssh_common_args' + version_added: '2.7' + env: + - name: ANSIBLE_SSH_COMMON_ARGS + version_added: '2.7' + vars: + - name: ansible_ssh_common_args + cli: + - name: ssh_common_args + default: '' + ssh_executable: + default: ssh + description: + - This defines the location of the SSH binary. It defaults to C(ssh) which will use the first SSH binary available in $PATH. + - This option is usually not required, it might be useful when access to system SSH is restricted, + or when using SSH wrappers to connect to remote hosts. + env: [{name: ANSIBLE_SSH_EXECUTABLE}] + ini: + - {key: ssh_executable, section: ssh_connection} + #const: ANSIBLE_SSH_EXECUTABLE + version_added: "2.2" + vars: + - name: ansible_ssh_executable + version_added: '2.7' + sftp_executable: + default: sftp + description: + - This defines the location of the sftp binary. It defaults to C(sftp) which will use the first binary available in $PATH. + env: [{name: ANSIBLE_SFTP_EXECUTABLE}] + ini: + - {key: sftp_executable, section: ssh_connection} + version_added: "2.6" + vars: + - name: ansible_sftp_executable + version_added: '2.7' + scp_executable: + default: scp + description: + - This defines the location of the scp binary. It defaults to C(scp) which will use the first binary available in $PATH. + env: [{name: ANSIBLE_SCP_EXECUTABLE}] + ini: + - {key: scp_executable, section: ssh_connection} + version_added: "2.6" + vars: + - name: ansible_scp_executable + version_added: '2.7' + scp_extra_args: + description: Extra exclusive to the C(scp) CLI + vars: + - name: ansible_scp_extra_args + env: + - name: ANSIBLE_SCP_EXTRA_ARGS + version_added: '2.7' + ini: + - key: scp_extra_args + section: ssh_connection + version_added: '2.7' + cli: + - name: scp_extra_args + default: '' + sftp_extra_args: + description: Extra exclusive to the C(sftp) CLI + vars: + - name: ansible_sftp_extra_args + env: + - name: ANSIBLE_SFTP_EXTRA_ARGS + version_added: '2.7' + ini: + - key: sftp_extra_args + section: ssh_connection + version_added: '2.7' + cli: + - name: sftp_extra_args + default: '' + ssh_extra_args: + description: Extra exclusive to the SSH CLI. + vars: + - name: ansible_ssh_extra_args + env: + - name: ANSIBLE_SSH_EXTRA_ARGS + version_added: '2.7' + ini: + - key: ssh_extra_args + section: ssh_connection + version_added: '2.7' + cli: + - name: ssh_extra_args + default: '' + reconnection_retries: + description: + - Number of attempts to connect. + - Ansible retries connections only if it gets an SSH error with a return code of 255. + - Any errors with return codes other than 255 indicate an issue with program execution. + default: 0 + type: integer + env: + - name: ANSIBLE_SSH_RETRIES + ini: + - section: connection + key: retries + - section: ssh_connection + key: retries + vars: + - name: ansible_ssh_retries + version_added: '2.7' + port: + description: Remote port to connect to. + type: int + ini: + - section: defaults + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + vars: + - name: ansible_port + - name: ansible_ssh_port + keyword: + - name: port + remote_user: + description: + - User name with which to login to the remote server, normally set by the remote_user keyword. + - If no user is supplied, Ansible will let the SSH client binary choose the user as it normally. + ini: + - section: defaults + key: remote_user + env: + - name: ANSIBLE_REMOTE_USER + vars: + - name: ansible_user + - name: ansible_ssh_user + cli: + - name: user + keyword: + - name: remote_user + pipelining: + env: + - name: ANSIBLE_PIPELINING + - name: ANSIBLE_SSH_PIPELINING + ini: + - section: defaults + key: pipelining + - section: connection + key: pipelining + - section: ssh_connection + key: pipelining + vars: + - name: ansible_pipelining + - name: ansible_ssh_pipelining + + private_key_file: + description: + - Path to private key file to use for authentication. + ini: + - section: defaults + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + - name: ansible_ssh_private_key_file + cli: + - name: private_key_file + option: '--private-key' + + control_path: + description: + - This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution. + - Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting. + - Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``. + - Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args. + env: + - name: ANSIBLE_SSH_CONTROL_PATH + ini: + - key: control_path + section: ssh_connection + vars: + - name: ansible_control_path + version_added: '2.7' + control_path_dir: + default: ~/.ansible/cp + description: + - This sets the directory to use for ssh control path if the control path setting is null. + - Also, provides the ``%(directory)s`` variable for the control path setting. + env: + - name: ANSIBLE_SSH_CONTROL_PATH_DIR + ini: + - section: ssh_connection + key: control_path_dir + vars: + - name: ansible_control_path_dir + version_added: '2.7' + sftp_batch_mode: + default: 'yes' + description: 'TODO: write it' + env: [{name: ANSIBLE_SFTP_BATCH_MODE}] + ini: + - {key: sftp_batch_mode, section: ssh_connection} + type: bool + vars: + - name: ansible_sftp_batch_mode + version_added: '2.7' + ssh_transfer_method: + description: + - "Preferred method to use when transferring files over ssh" + - Setting to 'smart' (default) will try them in order, until one succeeds or they all fail + - For OpenSSH >=9.0 you must add an additional option to enable scp (scp_extra_args="-O") + - Using 'piped' creates an ssh pipe with C(dd) on either side to copy the data + choices: ['sftp', 'scp', 'piped', 'smart'] + env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}] + ini: + - {key: transfer_method, section: ssh_connection} + vars: + - name: ansible_ssh_transfer_method + version_added: '2.12' + scp_if_ssh: + deprecated: + why: In favor of the "ssh_transfer_method" option. + version: "2.17" + alternatives: ssh_transfer_method + default: smart + description: + - "Preferred method to use when transferring files over SSH." + - When set to I(smart), Ansible will try them until one succeeds or they all fail. + - If set to I(True), it will force 'scp', if I(False) it will use 'sftp'. + - For OpenSSH >=9.0 you must add an additional option to enable scp (scp_extra_args="-O") + - This setting will overridden by ssh_transfer_method if set. + env: [{name: ANSIBLE_SCP_IF_SSH}] + ini: + - {key: scp_if_ssh, section: ssh_connection} + vars: + - name: ansible_scp_if_ssh + version_added: '2.7' + use_tty: + version_added: '2.5' + default: 'yes' + description: add -tt to ssh commands to force tty allocation. + env: [{name: ANSIBLE_SSH_USETTY}] + ini: + - {key: usetty, section: ssh_connection} + type: bool + vars: + - name: ansible_ssh_use_tty + version_added: '2.7' + timeout: + default: 10 + description: + - This is the default amount of time we will wait while establishing an SSH connection. + - It also controls how long we can wait to access reading the connection once established (select on the socket). + env: + - name: ANSIBLE_TIMEOUT + - name: ANSIBLE_SSH_TIMEOUT + version_added: '2.11' + ini: + - key: timeout + section: defaults + - key: timeout + section: ssh_connection + version_added: '2.11' + vars: + - name: ansible_ssh_timeout + version_added: '2.11' + cli: + - name: timeout + type: integer + pkcs11_provider: + version_added: '2.12' + default: "" + description: + - "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so" + - Requires sshpass version 1.06+, sshpass must support the -P option. + env: [{name: ANSIBLE_PKCS11_PROVIDER}] + ini: + - {key: pkcs11_provider, section: ssh_connection} + vars: + - name: ansible_ssh_pkcs11_provider +''' + +import errno +import fcntl +import hashlib +import os +import pty +import re +import shlex +import subprocess +import time + +from functools import wraps +from ansible.errors import ( + AnsibleAuthenticationFailure, + AnsibleConnectionFailure, + AnsibleError, + AnsibleFileNotFound, +) +from ansible.errors import AnsibleOptionsError +from ansible.module_utils.compat import selectors +from ansible.module_utils.six import PY3, text_type, binary_type +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.plugins.shell.powershell import _parse_clixml +from ansible.utils.display import Display +from ansible.utils.path import unfrackpath, makedirs_safe + +display = Display() + +# error messages that indicate 255 return code is not from ssh itself. +b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception + # while invoking a script via -m + b'PHP Parse error:', # Php always returns with error + b'chmod: invalid mode', # chmod, but really only on AIX + b'chmod: A flag or octal number is not correct.', # chmod, other AIX + ) + +SSHPASS_AVAILABLE = None +SSH_DEBUG = re.compile(r'^debug\d+: .*') + + +class AnsibleControlPersistBrokenPipeError(AnsibleError): + ''' ControlPersist broken pipe ''' + pass + + +def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display): + + # sshpass errors + if command == b'sshpass': + # Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account. + if return_tuple[0] == 5: + msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries) + if remaining_retries <= 0: + msg = 'Invalid/incorrect password:' + if no_log: + msg = '{0} <error censored due to no log>'.format(msg) + else: + msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip()) + raise AnsibleAuthenticationFailure(msg) + + # sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios. + # No exception is raised, so the connection is retried - except when attempting to use + # sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly. + elif return_tuple[0] in [1, 2, 3, 4, 6]: + msg = 'sshpass error:' + if no_log: + msg = '{0} <error censored due to no log>'.format(msg) + else: + details = to_native(return_tuple[2]).rstrip() + if "sshpass: invalid option -- 'P'" in details: + details = 'Installed sshpass version does not support customized password prompts. ' \ + 'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.' + raise AnsibleError('{0} {1}'.format(msg, details)) + msg = '{0} {1}'.format(msg, details) + + if return_tuple[0] == 255: + SSH_ERROR = True + for signature in b_NOT_SSH_ERRORS: + # 1 == stout, 2 == stderr + if signature in return_tuple[1] or signature in return_tuple[2]: + SSH_ERROR = False + break + + if SSH_ERROR: + msg = "Failed to connect to the host via ssh:" + if no_log: + msg = '{0} <error censored due to no log>'.format(msg) + else: + msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip()) + raise AnsibleConnectionFailure(msg) + + # For other errors, no exception is raised so the connection is retried and we only log the messages + if 1 <= return_tuple[0] <= 254: + msg = u"Failed to connect to the host via ssh:" + if no_log: + msg = u'{0} <error censored due to no log>'.format(msg) + else: + msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip()) + display.vvv(msg, host=host) + + +def _ssh_retry(func): + """ + Decorator to retry ssh/scp/sftp in the case of a connection failure + + Will retry if: + * an exception is caught + * ssh returns 255 + Will not retry if + * sshpass returns 5 (invalid password, to prevent account lockouts) + * remaining_tries is < 2 + * retries limit reached + """ + @wraps(func) + def wrapped(self, *args, **kwargs): + remaining_tries = int(self.get_option('reconnection_retries')) + 1 + cmd_summary = u"%s..." % to_text(args[0]) + conn_password = self.get_option('password') or self._play_context.password + for attempt in range(remaining_tries): + cmd = args[0] + if attempt != 0 and conn_password and isinstance(cmd, list): + # If this is a retry, the fd/pipe for sshpass is closed, and we need a new one + self.sshpass_pipe = os.pipe() + cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict') + + try: + try: + return_tuple = func(self, *args, **kwargs) + # TODO: this should come from task + if self._play_context.no_log: + display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host) + else: + display.vvv(return_tuple, host=self.host) + # 0 = success + # 1-254 = remote command return code + # 255 could be a failure from the ssh command itself + except (AnsibleControlPersistBrokenPipeError): + # Retry one more time because of the ControlPersist broken pipe (see #16731) + cmd = args[0] + if conn_password and isinstance(cmd, list): + # This is a retry, so the fd/pipe for sshpass is closed, and we need a new one + self.sshpass_pipe = os.pipe() + cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict') + display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE") + return_tuple = func(self, *args, **kwargs) + + remaining_retries = remaining_tries - attempt - 1 + _handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host) + + break + + # 5 = Invalid/incorrect password from sshpass + except AnsibleAuthenticationFailure: + # Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries + raise + + except (AnsibleConnectionFailure, Exception) as e: + + if attempt == remaining_tries - 1: + raise + else: + pause = 2 ** attempt - 1 + if pause > 30: + pause = 30 + + if isinstance(e, AnsibleConnectionFailure): + msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause) + else: + msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), " + u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause)) + + display.vv(msg, host=self.host) + + time.sleep(pause) + continue + + return return_tuple + return wrapped + + +class Connection(ConnectionBase): + ''' ssh based connections ''' + + transport = 'ssh' + has_pipelining = True + + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + + # TODO: all should come from get_option(), but not might be set at this point yet + self.host = self._play_context.remote_addr + self.port = self._play_context.port + self.user = self._play_context.remote_user + self.control_path = None + self.control_path_dir = None + + # Windows operates differently from a POSIX connection/shell plugin, + # we need to set various properties to ensure SSH on Windows continues + # to work + if getattr(self._shell, "_IS_WINDOWS", False): + self.has_native_async = True + self.always_pipeline_modules = True + self.module_implementation_preferences = ('.ps1', '.exe', '') + self.allow_executable = False + + # The connection is created by running ssh/scp/sftp from the exec_command, + # put_file, and fetch_file methods, so we don't need to do any connection + # management here. + + def _connect(self): + return self + + @staticmethod + def _create_control_path(host, port, user, connection=None, pid=None): + '''Make a hash for the controlpath based on con attributes''' + pstring = '%s-%s-%s' % (host, port, user) + if connection: + pstring += '-%s' % connection + if pid: + pstring += '-%s' % to_text(pid) + m = hashlib.sha1() + m.update(to_bytes(pstring)) + digest = m.hexdigest() + cpath = '%(directory)s/' + digest[:10] + return cpath + + @staticmethod + def _sshpass_available(): + global SSHPASS_AVAILABLE + + # We test once if sshpass is available, and remember the result. It + # would be nice to use distutils.spawn.find_executable for this, but + # distutils isn't always available; shutils.which() is Python3-only. + + if SSHPASS_AVAILABLE is None: + try: + p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p.communicate() + SSHPASS_AVAILABLE = True + except OSError: + SSHPASS_AVAILABLE = False + + return SSHPASS_AVAILABLE + + @staticmethod + def _persistence_controls(b_command): + ''' + Takes a command array and scans it for ControlPersist and ControlPath + settings and returns two booleans indicating whether either was found. + This could be smarter, e.g. returning false if ControlPersist is 'no', + but for now we do it simple way. + ''' + + controlpersist = False + controlpath = False + + for b_arg in (a.lower() for a in b_command): + if b'controlpersist' in b_arg: + controlpersist = True + elif b'controlpath' in b_arg: + controlpath = True + + return controlpersist, controlpath + + def _add_args(self, b_command, b_args, explanation): + """ + Adds arguments to the ssh command and displays a caller-supplied explanation of why. + + :arg b_command: A list containing the command to add the new arguments to. + This list will be modified by this method. + :arg b_args: An iterable of new arguments to add. This iterable is used + more than once so it must be persistent (ie: a list is okay but a + StringIO would not) + :arg explanation: A text string containing explaining why the arguments + were added. It will be displayed with a high enough verbosity. + .. note:: This function does its work via side-effect. The b_command list has the new arguments appended. + """ + display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host) + b_command += b_args + + def _build_command(self, binary, subsystem, *other_args): + ''' + Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command + wrapped in local ssh shell commands and ready for execution. + + :arg binary: actual executable to use to execute command. + :arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names. + :arg other_args: dict of, value pairs passed as arguments to the ssh binary + + ''' + + b_command = [] + conn_password = self.get_option('password') or self._play_context.password + + # + # First, the command to invoke + # + + # If we want to use password authentication, we have to set up a pipe to + # write the password to sshpass. + pkcs11_provider = self.get_option("pkcs11_provider") + if conn_password or pkcs11_provider: + if not self._sshpass_available(): + raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program") + if not conn_password and pkcs11_provider: + raise AnsibleError("to use pkcs11_provider you must specify a password/pin") + + self.sshpass_pipe = os.pipe() + b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')] + + password_prompt = self.get_option('sshpass_prompt') + if not password_prompt and pkcs11_provider: + # Set default password prompt for pkcs11_provider to make it clear its a PIN + password_prompt = 'Enter PIN for ' + + if password_prompt: + b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')] + + b_command += [to_bytes(binary, errors='surrogate_or_strict')] + + # + # Next, additional arguments based on the configuration. + # + + # pkcs11 mode allows the use of Smartcards or Yubikey devices + if conn_password and pkcs11_provider: + self._add_args(b_command, + (b"-o", b"KbdInteractiveAuthentication=no", + b"-o", b"PreferredAuthentications=publickey", + b"-o", b"PasswordAuthentication=no", + b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)), + u'Enable pkcs11') + + # sftp batch mode allows us to correctly catch failed transfers, but can + # be disabled if the client side doesn't support the option. However, + # sftp batch mode does not prompt for passwords so it must be disabled + # if not using controlpersist and using sshpass + if subsystem == 'sftp' and self.get_option('sftp_batch_mode'): + if conn_password: + b_args = [b'-o', b'BatchMode=no'] + self._add_args(b_command, b_args, u'disable batch mode for sshpass') + b_command += [b'-b', b'-'] + + if display.verbosity > 3: + b_command.append(b'-vvv') + + # Next, we add ssh_args + ssh_args = self.get_option('ssh_args') + if ssh_args: + b_args = [to_bytes(a, errors='surrogate_or_strict') for a in + self._split_ssh_args(ssh_args)] + self._add_args(b_command, b_args, u"ansible.cfg set ssh_args") + + # Now we add various arguments that have their own specific settings defined in docs above. + if self.get_option('host_key_checking') is False: + b_args = (b"-o", b"StrictHostKeyChecking=no") + self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled") + + self.port = self.get_option('port') + if self.port is not None: + b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict')) + self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set") + + key = self.get_option('private_key_file') + if key: + b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"') + self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set") + + if not conn_password: + self._add_args( + b_command, ( + b"-o", b"KbdInteractiveAuthentication=no", + b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey", + b"-o", b"PasswordAuthentication=no" + ), + u"ansible_password/ansible_ssh_password not set" + ) + + self.user = self.get_option('remote_user') + if self.user: + self._add_args( + b_command, + (b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')), + u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set" + ) + + timeout = self.get_option('timeout') + self._add_args( + b_command, + (b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')), + u"ANSIBLE_TIMEOUT/timeout set" + ) + + # Add in any common or binary-specific arguments from the PlayContext + # (i.e. inventory or task settings or overrides on the command line). + + for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)): + attr = self.get_option(opt) + if attr is not None: + b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)] + self._add_args(b_command, b_args, u"Set %s" % opt) + + # Check if ControlPersist is enabled and add a ControlPath if one hasn't + # already been set. + + controlpersist, controlpath = self._persistence_controls(b_command) + + if controlpersist: + self._persistent = True + + if not controlpath: + self.control_path_dir = self.get_option('control_path_dir') + cpdir = unfrackpath(self.control_path_dir) + b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict') + + # The directory must exist and be writable. + makedirs_safe(b_cpdir, 0o700) + if not os.access(b_cpdir, os.W_OK): + raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir)) + + self.control_path = self.get_option('control_path') + if not self.control_path: + self.control_path = self._create_control_path( + self.host, + self.port, + self.user + ) + b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict')) + self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath") + + # Finally, we add any caller-supplied extras. + if other_args: + b_command += [to_bytes(a) for a in other_args] + + return b_command + + def _send_initial_data(self, fh, in_data, ssh_process): + ''' + Writes initial data to the stdin filehandle of the subprocess and closes + it. (The handle must be closed; otherwise, for example, "sftp -b -" will + just hang forever waiting for more commands.) + ''' + + display.debug(u'Sending initial data') + + try: + fh.write(to_bytes(in_data)) + fh.close() + except (OSError, IOError) as e: + # The ssh connection may have already terminated at this point, with a more useful error + # Only raise AnsibleConnectionFailure if the ssh process is still alive + time.sleep(0.001) + ssh_process.poll() + if getattr(ssh_process, 'returncode', None) is None: + raise AnsibleConnectionFailure( + 'Data could not be sent to remote host "%s". Make sure this host can be reached ' + 'over ssh: %s' % (self.host, to_native(e)), orig_exc=e + ) + + display.debug(u'Sent initial data (%d bytes)' % len(in_data)) + + # Used by _run() to kill processes on failures + @staticmethod + def _terminate_process(p): + """ Terminate a process, ignoring errors """ + try: + p.terminate() + except (OSError, IOError): + pass + + # This is separate from _run() because we need to do the same thing for stdout + # and stderr. + def _examine_output(self, source, state, b_chunk, sudoable): + ''' + Takes a string, extracts complete lines from it, tests to see if they + are a prompt, error message, etc., and sets appropriate flags in self. + Prompt and success lines are removed. + + Returns the processed (i.e. possibly-edited) output and the unprocessed + remainder (to be processed with the next chunk) as strings. + ''' + + output = [] + for b_line in b_chunk.splitlines(True): + display_line = to_text(b_line).rstrip('\r\n') + suppress_output = False + + # display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line)) + if SSH_DEBUG.match(display_line): + # skip lines from ssh debug output to avoid false matches + pass + elif self.become.expect_prompt() and self.become.check_password_prompt(b_line): + display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line)) + self._flags['become_prompt'] = True + suppress_output = True + elif self.become.success and self.become.check_success(b_line): + display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line)) + self._flags['become_success'] = True + suppress_output = True + elif sudoable and self.become.check_incorrect_password(b_line): + display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line)) + self._flags['become_error'] = True + elif sudoable and self.become.check_missing_password(b_line): + display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line)) + self._flags['become_nopasswd_error'] = True + + if not suppress_output: + output.append(b_line) + + # The chunk we read was most likely a series of complete lines, but just + # in case the last line was incomplete (and not a prompt, which we would + # have removed from the output), we retain it to be processed with the + # next chunk. + + remainder = b'' + if output and not output[-1].endswith(b'\n'): + remainder = output[-1] + output = output[:-1] + + return b''.join(output), remainder + + def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True): + ''' + Starts the command and communicates with it until it ends. + ''' + + # We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen + display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd) + display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host) + + # Start the given command. If we don't need to pipeline data, we can try + # to use a pseudo-tty (ssh will have been invoked with -tt). If we are + # pipelining data, or can't create a pty, we fall back to using plain + # old pipes. + + p = None + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = list(map(to_bytes, cmd)) + + conn_password = self.get_option('password') or self._play_context.password + + if not in_data: + try: + # Make sure stdin is a proper pty to avoid tcgetattr errors + master, slave = pty.openpty() + if PY3 and conn_password: + # pylint: disable=unexpected-keyword-arg + p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe) + else: + p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = os.fdopen(master, 'wb', 0) + os.close(slave) + except (OSError, IOError): + p = None + + if not p: + try: + if PY3 and conn_password: + # pylint: disable=unexpected-keyword-arg + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe) + else: + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdin = p.stdin + except (OSError, IOError) as e: + raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e)) + + # If we are using SSH password authentication, write the password into + # the pipe we opened in _build_command. + + if conn_password: + os.close(self.sshpass_pipe[0]) + try: + os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n') + except OSError as e: + # Ignore broken pipe errors if the sshpass process has exited. + if e.errno != errno.EPIPE or p.poll() is None: + raise + os.close(self.sshpass_pipe[1]) + + # + # SSH state machine + # + + # Now we read and accumulate output from the running process until it + # exits. Depending on the circumstances, we may also need to write an + # escalation password and/or pipelined input to the process. + + states = [ + 'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit' + ] + + # Are we requesting privilege escalation? Right now, we may be invoked + # to execute sftp/scp with sudoable=True, but we can request escalation + # only when using ssh. Otherwise we can send initial data straightaway. + + state = states.index('ready_to_send') + if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable: + prompt = getattr(self.become, 'prompt', None) + if prompt: + # We're requesting escalation with a password, so we have to + # wait for a password prompt. + state = states.index('awaiting_prompt') + display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt))) + elif self.become and self.become.success: + # We're requesting escalation without a password, so we have to + # detect success/failure before sending any initial data. + state = states.index('awaiting_escalation') + display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success))) + + # We store accumulated stdout and stderr output from the process here, + # but strip any privilege escalation prompt/confirmation lines first. + # Output is accumulated into tmp_*, complete lines are extracted into + # an array, then checked and removed or copied to stdout or stderr. We + # set any flags based on examining the output in self._flags. + + b_stdout = b_stderr = b'' + b_tmp_stdout = b_tmp_stderr = b'' + + self._flags = dict( + become_prompt=False, become_success=False, + become_error=False, become_nopasswd_error=False + ) + + # select timeout should be longer than the connect timeout, otherwise + # they will race each other when we can't connect, and the connect + # timeout usually fails + timeout = 2 + self.get_option('timeout') + for fd in (p.stdout, p.stderr): + fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) + + # TODO: bcoca would like to use SelectSelector() when open + # select is faster when filehandles is low and we only ever handle 1. + selector = selectors.DefaultSelector() + selector.register(p.stdout, selectors.EVENT_READ) + selector.register(p.stderr, selectors.EVENT_READ) + + # If we can send initial data without waiting for anything, we do so + # before we start polling + if states[state] == 'ready_to_send' and in_data: + self._send_initial_data(stdin, in_data, p) + state += 1 + + try: + while True: + poll = p.poll() + events = selector.select(timeout) + + # We pay attention to timeouts only while negotiating a prompt. + + if not events: + # We timed out + if state <= states.index('awaiting_escalation'): + # If the process has already exited, then it's not really a + # timeout; we'll let the normal error handling deal with it. + if poll is not None: + break + self._terminate_process(p) + raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout))) + + # Read whatever output is available on stdout and stderr, and stop + # listening to the pipe if it's been closed. + + for key, event in events: + if key.fileobj == p.stdout: + b_chunk = p.stdout.read() + if b_chunk == b'': + # stdout has been closed, stop watching it + selector.unregister(p.stdout) + # When ssh has ControlMaster (+ControlPath/Persist) enabled, the + # first connection goes into the background and we never see EOF + # on stderr. If we see EOF on stdout, lower the select timeout + # to reduce the time wasted selecting on stderr if we observe + # that the process has not yet existed after this EOF. Otherwise + # we may spend a long timeout period waiting for an EOF that is + # not going to arrive until the persisted connection closes. + timeout = 1 + b_tmp_stdout += b_chunk + display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk))) + elif key.fileobj == p.stderr: + b_chunk = p.stderr.read() + if b_chunk == b'': + # stderr has been closed, stop watching it + selector.unregister(p.stderr) + b_tmp_stderr += b_chunk + display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk))) + + # We examine the output line-by-line until we have negotiated any + # privilege escalation prompt and subsequent success/error message. + # Afterwards, we can accumulate output without looking at it. + + if state < states.index('ready_to_send'): + if b_tmp_stdout: + b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable) + b_stdout += b_output + b_tmp_stdout = b_unprocessed + + if b_tmp_stderr: + b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable) + b_stderr += b_output + b_tmp_stderr = b_unprocessed + else: + b_stdout += b_tmp_stdout + b_stderr += b_tmp_stderr + b_tmp_stdout = b_tmp_stderr = b'' + + # If we see a privilege escalation prompt, we send the password. + # (If we're expecting a prompt but the escalation succeeds, we + # didn't need the password and can carry on regardless.) + + if states[state] == 'awaiting_prompt': + if self._flags['become_prompt']: + display.debug(u'Sending become_password in response to prompt') + become_pass = self.become.get_option('become_pass', playcontext=self._play_context) + stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + # On python3 stdin is a BufferedWriter, and we don't have a guarantee + # that the write will happen without a flush + stdin.flush() + self._flags['become_prompt'] = False + state += 1 + elif self._flags['become_success']: + state += 1 + + # We've requested escalation (with or without a password), now we + # wait for an error message or a successful escalation. + + if states[state] == 'awaiting_escalation': + if self._flags['become_success']: + display.vvv(u'Escalation succeeded') + self._flags['become_success'] = False + state += 1 + elif self._flags['become_error']: + display.vvv(u'Escalation failed') + self._terminate_process(p) + self._flags['become_error'] = False + raise AnsibleError('Incorrect %s password' % self.become.name) + elif self._flags['become_nopasswd_error']: + display.vvv(u'Escalation requires password') + self._terminate_process(p) + self._flags['become_nopasswd_error'] = False + raise AnsibleError('Missing %s password' % self.become.name) + elif self._flags['become_prompt']: + # This shouldn't happen, because we should see the "Sorry, + # try again" message first. + display.vvv(u'Escalation prompt repeated') + self._terminate_process(p) + self._flags['become_prompt'] = False + raise AnsibleError('Incorrect %s password' % self.become.name) + + # Once we're sure that the privilege escalation prompt, if any, has + # been dealt with, we can send any initial data and start waiting + # for output. + + if states[state] == 'ready_to_send': + if in_data: + self._send_initial_data(stdin, in_data, p) + state += 1 + + # Now we're awaiting_exit: has the child process exited? If it has, + # and we've read all available output from it, we're done. + + if poll is not None: + if not selector.get_map() or not events: + break + # We should not see further writes to the stdout/stderr file + # descriptors after the process has closed, set the select + # timeout to gather any last writes we may have missed. + timeout = 0 + continue + + # If the process has not yet exited, but we've already read EOF from + # its stdout and stderr (and thus no longer watching any file + # descriptors), we can just wait for it to exit. + + elif not selector.get_map(): + p.wait() + break + + # Otherwise there may still be outstanding data to read. + finally: + selector.close() + # close stdin, stdout, and stderr after process is terminated and + # stdout/stderr are read completely (see also issues #848, #64768). + stdin.close() + p.stdout.close() + p.stderr.close() + + if self.get_option('host_key_checking'): + if cmd[0] == b"sshpass" and p.returncode == 6: + raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support ' + 'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.') + + controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr + if p.returncode != 0 and controlpersisterror: + raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" ' + '(or ssh_args in [ssh_connection] section of the config file) before running again') + + # If we find a broken pipe because of ControlPersist timeout expiring (see #16731), + # we raise a special exception so that we can retry a connection. + controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr + if p.returncode == 255: + + additional = to_native(b_stderr) + if controlpersist_broken_pipe: + raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional) + + elif in_data and checkrc: + raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s' + % (self.host, additional)) + + return (p.returncode, b_stdout, b_stderr) + + @_ssh_retry + def _run(self, cmd, in_data, sudoable=True, checkrc=True): + """Wrapper around _bare_run that retries the connection + """ + return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc) + + @_ssh_retry + def _file_transport_command(self, in_path, out_path, sftp_action): + # scp and sftp require square brackets for IPv6 addresses, but + # accept them for hostnames and IPv4 addresses too. + host = '[%s]' % self.host + + smart_methods = ['sftp', 'scp', 'piped'] + + # Windows does not support dd so we cannot use the piped method + if getattr(self._shell, "_IS_WINDOWS", False): + smart_methods.remove('piped') + + # Transfer methods to try + methods = [] + + # Use the transfer_method option if set, otherwise use scp_if_ssh + ssh_transfer_method = self.get_option('ssh_transfer_method') + scp_if_ssh = self.get_option('scp_if_ssh') + if ssh_transfer_method is None and scp_if_ssh == 'smart': + ssh_transfer_method = 'smart' + + if ssh_transfer_method is not None: + if ssh_transfer_method == 'smart': + methods = smart_methods + else: + methods = [ssh_transfer_method] + else: + # since this can be a non-bool now, we need to handle it correctly + if not isinstance(scp_if_ssh, bool): + scp_if_ssh = scp_if_ssh.lower() + if scp_if_ssh in BOOLEANS: + scp_if_ssh = boolean(scp_if_ssh, strict=False) + elif scp_if_ssh != 'smart': + raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]') + if scp_if_ssh == 'smart': + methods = smart_methods + elif scp_if_ssh is True: + methods = ['scp'] + else: + methods = ['sftp'] + + for method in methods: + returncode = stdout = stderr = None + if method == 'sftp': + cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host)) + in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path)) + in_data = to_bytes(in_data, nonstring='passthru') + (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False) + elif method == 'scp': + scp = self.get_option('scp_executable') + + if sftp_action == 'get': + cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path) + else: + cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path))) + in_data = None + (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False) + elif method == 'piped': + if sftp_action == 'get': + # we pass sudoable=False to disable pty allocation, which + # would end up mixing stdout/stderr and screwing with newlines + (returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False) + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + out_file.write(stdout) + else: + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f: + in_data = to_bytes(f.read(), nonstring='passthru') + if not in_data: + count = ' count=0' + else: + count = '' + (returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False) + + # Check the return code and rollover to next method if failed + if returncode == 0: + return (returncode, stdout, stderr) + else: + # If not in smart mode, the data will be printed by the raise below + if len(methods) > 1: + display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host)) + display.debug(u'%s' % to_text(stdout)) + display.debug(u'%s' % to_text(stderr)) + + if returncode == 255: + raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr))) + else: + raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" % + (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr))) + + def _escape_win_path(self, path): + """ converts a Windows path to one that's supported by SFTP and SCP """ + # If using a root path then we need to start with / + prefix = "" + if re.match(r'^\w{1}:', path): + prefix = "/" + + # Convert all '\' to '/' + return "%s%s" % (prefix, path.replace("\\", "/")) + + # + # Main public methods + # + def exec_command(self, cmd, in_data=None, sudoable=True): + ''' run a command on the remote host ''' + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self.host = self.get_option('host') or self._play_context.remote_addr + + display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host) + + if getattr(self._shell, "_IS_WINDOWS", False): + # Become method 'runas' is done in the wrapper that is executed, + # need to disable sudoable so the bare_run is not waiting for a + # prompt that will not occur + sudoable = False + + # Make sure our first command is to set the console encoding to + # utf-8, this must be done via chcp to get utf-8 (65001) + cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND] + cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)) + cmd = ' '.join(cmd_parts) + + # we can only use tty when we are not pipelining the modules. piping + # data into /usr/bin/python inside a tty automatically invokes the + # python interactive-mode but the modules are not compatible with the + # interactive-mode ("unexpected indent" mainly because of empty lines) + + ssh_executable = self.get_option('ssh_executable') + + # -tt can cause various issues in some environments so allow the user + # to disable it as a troubleshooting method. + use_tty = self.get_option('use_tty') + + if not in_data and sudoable and use_tty: + args = ('-tt', self.host, cmd) + else: + args = (self.host, cmd) + + cmd = self._build_command(ssh_executable, 'ssh', *args) + (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable) + + # When running on Windows, stderr may contain CLIXML encoded output + if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"): + stderr = _parse_clixml(stderr) + + return (returncode, stdout, stderr) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + super(Connection, self).put_file(in_path, out_path) + + self.host = self.get_option('host') or self._play_context.remote_addr + + display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path))) + + if getattr(self._shell, "_IS_WINDOWS", False): + out_path = self._escape_win_path(out_path) + + return self._file_transport_command(in_path, out_path, 'put') + + def fetch_file(self, in_path, out_path): + ''' fetch a file from remote to local ''' + + super(Connection, self).fetch_file(in_path, out_path) + + self.host = self.get_option('host') or self._play_context.remote_addr + + display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host) + + # need to add / if path is rooted + if getattr(self._shell, "_IS_WINDOWS", False): + in_path = self._escape_win_path(in_path) + + return self._file_transport_command(in_path, out_path, 'get') + + def reset(self): + + run_reset = False + self.host = self.get_option('host') or self._play_context.remote_addr + + # If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening. + # only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set + # 'check' will determine this. + cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host) + display.vvv(u'sending connection check: %s' % to_text(cmd)) + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + status_code = p.wait() + if status_code != 0: + display.vvv(u"No connection to reset: %s" % to_text(stderr)) + else: + run_reset = True + + if run_reset: + cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host) + display.vvv(u'sending connection stop: %s' % to_text(cmd)) + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + status_code = p.wait() + if status_code != 0: + display.warning(u"Failed to reset connection:%s" % to_text(stderr)) + + self.close() + + def close(self): + self._connected = False diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py new file mode 100644 index 0000000..13c80ec --- /dev/null +++ b/lib/ansible/plugins/connection/winrm.py @@ -0,0 +1,755 @@ +# (c) 2014, Chris Church <chris@ninemoreminutes.com> +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + author: Ansible Core Team + name: winrm + short_description: Run tasks over Microsoft's WinRM + description: + - Run commands or put/fetch on a target via WinRM + - This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here. + They should take the form of variables declared with the following pattern C(ansible_winrm_<option>). + version_added: "2.0" + extends_documentation_fragment: + - connection_pipelining + requirements: + - pywinrm (python library) + options: + # figure out more elegant 'delegation' + remote_addr: + description: + - Address of the windows machine + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_winrm_host + type: str + remote_user: + description: + - The user to log in as to the Windows machine + vars: + - name: ansible_user + - name: ansible_winrm_user + keyword: + - name: remote_user + type: str + remote_password: + description: Authentication password for the C(remote_user). Can be supplied as CLI option. + vars: + - name: ansible_password + - name: ansible_winrm_pass + - name: ansible_winrm_password + type: str + aliases: + - password # Needed for --ask-pass to come through on delegation + port: + description: + - port for winrm to connect on remote target + - The default is the https (5986) port, if using http it should be 5985 + vars: + - name: ansible_port + - name: ansible_winrm_port + default: 5986 + keyword: + - name: port + type: integer + scheme: + description: + - URI scheme to use + - If not set, then will default to C(https) or C(http) if I(port) is + C(5985). + choices: [http, https] + vars: + - name: ansible_winrm_scheme + type: str + path: + description: URI path to connect to + default: '/wsman' + vars: + - name: ansible_winrm_path + type: str + transport: + description: + - List of winrm transports to attempt to use (ssl, plaintext, kerberos, etc) + - If None (the default) the plugin will try to automatically guess the correct list + - The choices available depend on your version of pywinrm + type: list + elements: string + vars: + - name: ansible_winrm_transport + kerberos_command: + description: kerberos command to use to request a authentication ticket + default: kinit + vars: + - name: ansible_winrm_kinit_cmd + type: str + kinit_args: + description: + - Extra arguments to pass to C(kinit) when getting the Kerberos authentication ticket. + - By default no extra arguments are passed into C(kinit) unless I(ansible_winrm_kerberos_delegation) is also + set. In that case C(-f) is added to the C(kinit) args so a forwardable ticket is retrieved. + - If set, the args will overwrite any existing defaults for C(kinit), including C(-f) for a delegated ticket. + type: str + vars: + - name: ansible_winrm_kinit_args + version_added: '2.11' + kinit_env_vars: + description: + - A list of environment variables to pass through to C(kinit) when getting the Kerberos authentication ticket. + - By default no environment variables are passed through and C(kinit) is run with a blank slate. + - The environment variable C(KRB5CCNAME) cannot be specified here as it's used to store the temp Kerberos + ticket used by WinRM. + type: list + elements: str + default: [] + ini: + - section: winrm + key: kinit_env_vars + vars: + - name: ansible_winrm_kinit_env_vars + version_added: '2.12' + kerberos_mode: + description: + - kerberos usage mode. + - The managed option means Ansible will obtain kerberos ticket. + - While the manual one means a ticket must already have been obtained by the user. + - If having issues with Ansible freezing when trying to obtain the + Kerberos ticket, you can either set this to C(manual) and obtain + it outside Ansible or install C(pexpect) through pip and try + again. + choices: [managed, manual] + vars: + - name: ansible_winrm_kinit_mode + type: str + connection_timeout: + description: + - Sets the operation and read timeout settings for the WinRM + connection. + - Corresponds to the C(operation_timeout_sec) and + C(read_timeout_sec) args in pywinrm so avoid setting these vars + with this one. + - The default value is whatever is set in the installed version of + pywinrm. + vars: + - name: ansible_winrm_connection_timeout + type: int +""" + +import base64 +import logging +import os +import re +import traceback +import json +import tempfile +import shlex +import subprocess + +from inspect import getfullargspec +from urllib.parse import urlunsplit + +HAVE_KERBEROS = False +try: + import kerberos + HAVE_KERBEROS = True +except ImportError: + pass + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure +from ansible.errors import AnsibleFileNotFound +from ansible.module_utils.json_utils import _filter_non_json_lines +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.six import binary_type +from ansible.plugins.connection import ConnectionBase +from ansible.plugins.shell.powershell import _parse_clixml +from ansible.utils.hashing import secure_hash +from ansible.utils.display import Display + + +try: + import winrm + from winrm import Response + from winrm.protocol import Protocol + import requests.exceptions + HAS_WINRM = True + WINRM_IMPORT_ERR = None +except ImportError as e: + HAS_WINRM = False + WINRM_IMPORT_ERR = e + +try: + import xmltodict + HAS_XMLTODICT = True + XMLTODICT_IMPORT_ERR = None +except ImportError as e: + HAS_XMLTODICT = False + XMLTODICT_IMPORT_ERR = e + +HAS_PEXPECT = False +try: + import pexpect + # echo was added in pexpect 3.3+ which is newer than the RHEL package + # we can only use pexpect for kerb auth if echo is a valid kwarg + # https://github.com/ansible/ansible/issues/43462 + if hasattr(pexpect, 'spawn'): + argspec = getfullargspec(pexpect.spawn.__init__) + if 'echo' in argspec.args: + HAS_PEXPECT = True +except ImportError as e: + pass + +# used to try and parse the hostname and detect if IPv6 is being used +try: + import ipaddress + HAS_IPADDRESS = True +except ImportError: + HAS_IPADDRESS = False + +display = Display() + + +class Connection(ConnectionBase): + '''WinRM connections over HTTP/HTTPS.''' + + transport = 'winrm' + module_implementation_preferences = ('.ps1', '.exe', '') + allow_executable = False + has_pipelining = True + allow_extras = True + + def __init__(self, *args, **kwargs): + + self.always_pipeline_modules = True + self.has_native_async = True + + self.protocol = None + self.shell_id = None + self.delegate = None + self._shell_type = 'powershell' + + super(Connection, self).__init__(*args, **kwargs) + + if not C.DEFAULT_DEBUG: + logging.getLogger('requests_credssp').setLevel(logging.INFO) + logging.getLogger('requests_kerberos').setLevel(logging.INFO) + logging.getLogger('urllib3').setLevel(logging.INFO) + + def _build_winrm_kwargs(self): + # this used to be in set_options, as win_reboot needs to be able to + # override the conn timeout, we need to be able to build the args + # after setting individual options. This is called by _connect before + # starting the WinRM connection + self._winrm_host = self.get_option('remote_addr') + self._winrm_user = self.get_option('remote_user') + self._winrm_pass = self.get_option('remote_password') + + self._winrm_port = self.get_option('port') + + self._winrm_scheme = self.get_option('scheme') + # old behaviour, scheme should default to http if not set and the port + # is 5985 otherwise https + if self._winrm_scheme is None: + self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https' + + self._winrm_path = self.get_option('path') + self._kinit_cmd = self.get_option('kerberos_command') + self._winrm_transport = self.get_option('transport') + self._winrm_connection_timeout = self.get_option('connection_timeout') + + if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'): + self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES) + else: + # for legacy versions of pywinrm, use the values we know are supported + self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos']) + + # calculate transport if needed + if self._winrm_transport is None or self._winrm_transport[0] is None: + # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic + transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext'] + + if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)): + self._winrm_transport = ['kerberos'] + transport_selector + else: + self._winrm_transport = transport_selector + + unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes) + + if unsupported_transports: + raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % + to_native(list(unsupported_transports), nonstring='simplerepr')) + + # if kerberos is among our transports and there's a password specified, we're managing the tickets + kinit_mode = self.get_option('kerberos_mode') + if kinit_mode is None: + # HACK: ideally, remove multi-transport stuff + self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "") + elif kinit_mode == "managed": + self._kerb_managed = True + elif kinit_mode == "manual": + self._kerb_managed = False + + # arg names we're going passing directly + internal_kwarg_mask = {'self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'} + + self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass) + argspec = getfullargspec(Protocol.__init__) + supported_winrm_args = set(argspec.args) + supported_winrm_args.update(internal_kwarg_mask) + passed_winrm_args = {v.replace('ansible_winrm_', '') for v in self.get_option('_extras')} + unsupported_args = passed_winrm_args.difference(supported_winrm_args) + + # warn for kwargs unsupported by the installed version of pywinrm + for arg in unsupported_args: + display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg)) + + # pass through matching extras, excluding the list we want to treat specially + for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args): + self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg] + + # Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection + # auth itself with a private CCACHE. + def _kerb_auth(self, principal, password): + if password is None: + password = "" + + self._kerb_ccache = tempfile.NamedTemporaryFile() + display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name) + krb5ccname = "FILE:%s" % self._kerb_ccache.name + os.environ["KRB5CCNAME"] = krb5ccname + krb5env = dict(PATH=os.environ["PATH"], KRB5CCNAME=krb5ccname) + + # Add any explicit environment vars into the krb5env block + kinit_env_vars = self.get_option('kinit_env_vars') + for var in kinit_env_vars: + if var not in krb5env and var in os.environ: + krb5env[var] = os.environ[var] + + # Stores various flags to call with kinit, these could be explicit args set by 'ansible_winrm_kinit_args' OR + # '-f' if kerberos delegation is requested (ansible_winrm_kerberos_delegation). + kinit_cmdline = [self._kinit_cmd] + kinit_args = self.get_option('kinit_args') + if kinit_args: + kinit_args = [to_text(a) for a in shlex.split(kinit_args) if a.strip()] + kinit_cmdline.extend(kinit_args) + + elif boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)): + kinit_cmdline.append('-f') + + kinit_cmdline.append(principal) + + # pexpect runs the process in its own pty so it can correctly send + # the password as input even on MacOS which blocks subprocess from + # doing so. Unfortunately it is not available on the built in Python + # so we can only use it if someone has installed it + if HAS_PEXPECT: + proc_mechanism = "pexpect" + command = kinit_cmdline.pop(0) + password = to_text(password, encoding='utf-8', + errors='surrogate_or_strict') + + display.vvvv("calling kinit with pexpect for principal %s" + % principal) + try: + child = pexpect.spawn(command, kinit_cmdline, timeout=60, + env=krb5env, echo=False) + except pexpect.ExceptionPexpect as err: + err_msg = "Kerberos auth failure when calling kinit cmd " \ + "'%s': %s" % (command, to_native(err)) + raise AnsibleConnectionFailure(err_msg) + + try: + child.expect(".*:") + child.sendline(password) + except OSError as err: + # child exited before the pass was sent, Ansible will raise + # error based on the rc below, just display the error here + display.vvvv("kinit with pexpect raised OSError: %s" + % to_native(err)) + + # technically this is the stdout + stderr but to match the + # subprocess error checking behaviour, we will call it stderr + stderr = child.read() + child.wait() + rc = child.exitstatus + else: + proc_mechanism = "subprocess" + password = to_bytes(password, encoding='utf-8', + errors='surrogate_or_strict') + + display.vvvv("calling kinit with subprocess for principal %s" + % principal) + try: + p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=krb5env) + + except OSError as err: + err_msg = "Kerberos auth failure when calling kinit cmd " \ + "'%s': %s" % (self._kinit_cmd, to_native(err)) + raise AnsibleConnectionFailure(err_msg) + + stdout, stderr = p.communicate(password + b'\n') + rc = p.returncode != 0 + + if rc != 0: + # one last attempt at making sure the password does not exist + # in the output + exp_msg = to_native(stderr.strip()) + exp_msg = exp_msg.replace(to_native(password), "<redacted>") + + err_msg = "Kerberos auth failure for principal %s with %s: %s" \ + % (principal, proc_mechanism, exp_msg) + raise AnsibleConnectionFailure(err_msg) + + display.vvvvv("kinit succeeded for principal %s" % principal) + + def _winrm_connect(self): + ''' + Establish a WinRM connection over HTTP/HTTPS. + ''' + display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % + (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) + + winrm_host = self._winrm_host + if HAS_IPADDRESS: + display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host) + try: + ipaddress.IPv6Address(winrm_host) + except ipaddress.AddressValueError: + pass + else: + winrm_host = "[%s]" % winrm_host + + netloc = '%s:%d' % (winrm_host, self._winrm_port) + endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) + errors = [] + for transport in self._winrm_transport: + if transport == 'kerberos': + if not HAVE_KERBEROS: + errors.append('kerberos: the python kerberos library is not installed') + continue + if self._kerb_managed: + self._kerb_auth(self._winrm_user, self._winrm_pass) + display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) + try: + winrm_kwargs = self._winrm_kwargs.copy() + if self._winrm_connection_timeout: + winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout + winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1 + protocol = Protocol(endpoint, transport=transport, **winrm_kwargs) + + # open the shell from connect so we know we're able to talk to the server + if not self.shell_id: + self.shell_id = protocol.open_shell(codepage=65001) # UTF-8 + display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) + + return protocol + except Exception as e: + err_msg = to_text(e).strip() + if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I): + raise AnsibleError('the connection attempt timed out') + m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg) + if m: + code = int(m.groups()[0]) + if code == 401: + err_msg = 'the specified credentials were rejected by the server' + elif code == 411: + return protocol + errors.append(u'%s: %s' % (transport, err_msg)) + display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host) + if errors: + raise AnsibleConnectionFailure(', '.join(map(to_native, errors))) + else: + raise AnsibleError('No transport found for WinRM connection') + + def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False): + rq = {'env:Envelope': protocol._get_soap_header( + resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', + action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send', + shell_id=shell_id)} + stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\ + .setdefault('rsp:Stream', {}) + stream['@Name'] = 'stdin' + stream['@CommandId'] = command_id + stream['#text'] = base64.b64encode(to_bytes(stdin)) + if eof: + stream['@End'] = 'true' + protocol.send_message(xmltodict.unparse(rq)) + + def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): + if not self.protocol: + self.protocol = self._winrm_connect() + self._connected = True + if from_exec: + display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) + else: + display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) + command_id = None + try: + stdin_push_failed = False + command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None)) + + try: + if stdin_iterator: + for (data, is_last) in stdin_iterator: + self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) + + except Exception as ex: + display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s" + % (type(ex).__name__, to_text(ex))) + display.debug(traceback.format_exc()) + stdin_push_failed = True + + # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy). + # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. + resptuple = self.protocol.get_command_output(self.shell_id, command_id) + # ensure stdout/stderr are text for py3 + # FUTURE: this should probably be done internally by pywinrm + response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple)) + + # TODO: check result from response and set stdin_push_failed if we have nonzero + if from_exec: + display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) + else: + display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) + + display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host) + display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host) + + if stdin_push_failed: + # There are cases where the stdin input failed but the WinRM service still processed it. We attempt to + # see if stdout contains a valid json return value so we can ignore this error + try: + filtered_output, dummy = _filter_non_json_lines(response.std_out) + json.loads(filtered_output) + except ValueError: + # stdout does not contain a return response, stdin input was a fatal error + stderr = to_bytes(response.std_err, encoding='utf-8') + if stderr.startswith(b"#< CLIXML"): + stderr = _parse_clixml(stderr) + + raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' + % (to_native(response.std_out), to_native(stderr))) + + return response + except requests.exceptions.Timeout as exc: + raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc)) + finally: + if command_id: + self.protocol.cleanup_command(self.shell_id, command_id) + + def _connect(self): + + if not HAS_WINRM: + raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR)) + elif not HAS_XMLTODICT: + raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR)) + + super(Connection, self)._connect() + if not self.protocol: + self._build_winrm_kwargs() # build the kwargs from the options set + self.protocol = self._winrm_connect() + self._connected = True + return self + + def reset(self): + if not self._connected: + return + self.protocol = None + self.shell_id = None + self._connect() + + def _wrapper_payload_stream(self, payload, buffer_size=200000): + payload_bytes = to_bytes(payload) + byte_count = len(payload_bytes) + for i in range(0, byte_count, buffer_size): + yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count + + def exec_command(self, cmd, in_data=None, sudoable=True): + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) + + # TODO: display something meaningful here + display.vvv("EXEC (via pipeline wrapper)") + + stdin_iterator = None + + if in_data: + stdin_iterator = self._wrapper_payload_stream(in_data) + + result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator) + + result.std_out = to_bytes(result.std_out) + result.std_err = to_bytes(result.std_err) + + # parse just stderr from CLIXML output + if result.std_err.startswith(b"#< CLIXML"): + try: + result.std_err = _parse_clixml(result.std_err) + except Exception: + # unsure if we're guaranteed a valid xml doc- use raw output in case of error + pass + + return (result.status_code, result.std_out, result.std_err) + + # FUTURE: determine buffer size at runtime via remote winrm config? + def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000): + in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict')) + offset = 0 + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + for out_data in iter((lambda: in_file.read(buffer_size)), b''): + offset += len(out_data) + self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host) + # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded + b64_data = base64.b64encode(out_data) + b'\r\n' + # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal + yield b64_data, (in_file.tell() == in_size) + + if offset == 0: # empty file, return an empty buffer + eof to close it + yield "", True + + def put_file(self, in_path, out_path): + super(Connection, self).put_file(in_path, out_path) + out_path = self._shell._unquote(out_path) + display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path)) + + script_template = u''' + begin {{ + $path = '{0}' + + $DebugPreference = "Continue" + $ErrorActionPreference = "Stop" + Set-StrictMode -Version 2 + + $fd = [System.IO.File]::Create($path) + + $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() + + $bytes = @() #initialize for empty file case + }} + process {{ + $bytes = [System.Convert]::FromBase64String($input) + $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null + $fd.Write($bytes, 0, $bytes.Length) + }} + end {{ + $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null + + $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant() + + $fd.Close() + + Write-Output "{{""sha1"":""$hash""}}" + }} + ''' + + script = script_template.format(self._shell._escape(out_path)) + cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False) + + result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) + # TODO: improve error handling + if result.status_code != 0: + raise AnsibleError(to_native(result.std_err)) + + try: + put_output = json.loads(result.std_out) + except ValueError: + # stdout does not contain a valid response + stderr = to_bytes(result.std_err, encoding='utf-8') + if stderr.startswith(b"#< CLIXML"): + stderr = _parse_clixml(stderr) + raise AnsibleError('winrm put_file failed; \nstdout: %s\nstderr %s' % (to_native(result.std_out), to_native(stderr))) + + remote_sha1 = put_output.get("sha1") + if not remote_sha1: + raise AnsibleError("Remote sha1 was not returned") + + local_sha1 = secure_hash(in_path) + + if not remote_sha1 == local_sha1: + raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1))) + + def fetch_file(self, in_path, out_path): + super(Connection, self).fetch_file(in_path, out_path) + in_path = self._shell._unquote(in_path) + out_path = out_path.replace('\\', '/') + # consistent with other connection plugins, we assume the caller has created the target dir + display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) + buffer_size = 2**19 # 0.5MB chunks + out_file = None + try: + offset = 0 + while True: + try: + script = ''' + $path = '%(path)s' + If (Test-Path -Path $path -PathType Leaf) + { + $buffer_size = %(buffer_size)d + $offset = %(offset)d + + $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite) + $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null + $buffer = New-Object -TypeName byte[] $buffer_size + $bytes_read = $stream.Read($buffer, 0, $buffer_size) + if ($bytes_read -gt 0) { + $bytes = $buffer[0..($bytes_read - 1)] + [System.Convert]::ToBase64String($bytes) + } + $stream.Close() > $null + } + ElseIf (Test-Path -Path $path -PathType Container) + { + Write-Host "[DIR]"; + } + Else + { + Write-Error "$path does not exist"; + Exit 1; + } + ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset) + display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host) + cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False) + result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) + if result.status_code != 0: + raise IOError(to_native(result.std_err)) + if result.std_out.strip() == '[DIR]': + data = None + else: + data = base64.b64decode(result.std_out.strip()) + if data is None: + break + else: + if not out_file: + # If out_path is a directory and we're expecting a file, bail out now. + if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')): + break + out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') + out_file.write(data) + if len(data) < buffer_size: + break + offset += len(data) + except Exception: + traceback.print_exc() + raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path)) + finally: + if out_file: + out_file.close() + + def close(self): + if self.protocol and self.shell_id: + display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host) + self.protocol.close_shell(self.shell_id) + self.shell_id = None + self.protocol = None + self._connected = False |