diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:04:21 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 16:04:21 +0000 |
commit | 8a754e0858d922e955e71b253c139e071ecec432 (patch) | |
tree | 527d16e74bfd1840c85efd675fdecad056c54107 /test/support/integration | |
parent | Initial commit. (diff) | |
download | ansible-core-upstream.tar.xz ansible-core-upstream.zip |
Adding upstream version 2.14.3.upstream/2.14.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/support/integration')
12 files changed, 5612 insertions, 0 deletions
diff --git a/test/support/integration/plugins/filter/json_query.py b/test/support/integration/plugins/filter/json_query.py new file mode 100644 index 0000000..d1da71b --- /dev/null +++ b/test/support/integration/plugins/filter/json_query.py @@ -0,0 +1,53 @@ +# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError, AnsibleFilterError + +try: + import jmespath + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def json_query(data, expr): + '''Query data using jmespath query language ( http://jmespath.org ). Example: + - debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}" + ''' + if not HAS_LIB: + raise AnsibleError('You need to install "jmespath" prior to running ' + 'json_query filter') + + try: + return jmespath.search(expr, data) + except jmespath.exceptions.JMESPathError as e: + raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) + except Exception as e: + # For older jmespath, we can get ValueError and TypeError without much info. + raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + return { + 'json_query': json_query + } diff --git a/test/support/integration/plugins/module_utils/compat/__init__.py b/test/support/integration/plugins/module_utils/compat/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/support/integration/plugins/module_utils/compat/__init__.py diff --git a/test/support/integration/plugins/module_utils/compat/ipaddress.py b/test/support/integration/plugins/module_utils/compat/ipaddress.py new file mode 100644 index 0000000..c46ad72 --- /dev/null +++ b/test/support/integration/plugins/module_utils/compat/ipaddress.py @@ -0,0 +1,2476 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file, and this file only, is based on +# Lib/ipaddress.py of cpython +# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# +# 1. This LICENSE AGREEMENT is between the Python Software Foundation +# ("PSF"), and the Individual or Organization ("Licensee") accessing and +# otherwise using this software ("Python") in source or binary form and +# its associated documentation. +# +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +# analyze, test, perform and/or display publicly, prepare derivative works, +# distribute, and otherwise use Python alone or in any derivative version, +# provided, however, that PSF's License Agreement and PSF's notice of copyright, +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +# are retained in Python alone or in any derivative version prepared by Licensee. +# +# 3. In the event Licensee prepares a derivative work that is based on +# or incorporates Python or any part thereof, and wants to make +# the derivative work available to others as provided herein, then +# Licensee hereby agrees to include in any such work a brief summary of +# the changes made to Python. +# +# 4. PSF is making Python available to Licensee on an "AS IS" +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +# INFRINGE ANY THIRD PARTY RIGHTS. +# +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. +# +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. +# +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote +# products or services of Licensee, or any third party. +# +# 8. By copying, installing or otherwise using Python, Licensee +# agrees to be bound by the terms and conditions of this License +# Agreement. + +# Copyright 2007 Google Inc. +# Licensed to PSF under a Contributor Agreement. + +"""A fast, lightweight IPv4/IPv6 manipulation library in Python. + +This library is used to create/poke/manipulate IPv4 and IPv6 addresses +and networks. + +""" + +from __future__ import unicode_literals + + +import itertools +import struct + + +# The following makes it easier for us to script updates of the bundled code and is not part of +# upstream +_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"} + +__version__ = '1.0.22' + +# Compatibility functions +_compat_int_types = (int,) +try: + _compat_int_types = (int, long) +except NameError: + pass +try: + _compat_str = unicode +except NameError: + _compat_str = str + assert bytes != str +if b'\0'[0] == 0: # Python 3 semantics + def _compat_bytes_to_byte_vals(byt): + return byt +else: + def _compat_bytes_to_byte_vals(byt): + return [struct.unpack(b'!B', b)[0] for b in byt] +try: + _compat_int_from_byte_vals = int.from_bytes +except AttributeError: + def _compat_int_from_byte_vals(bytvals, endianess): + assert endianess == 'big' + res = 0 + for bv in bytvals: + assert isinstance(bv, _compat_int_types) + res = (res << 8) + bv + return res + + +def _compat_to_bytes(intval, length, endianess): + assert isinstance(intval, _compat_int_types) + assert endianess == 'big' + if length == 4: + if intval < 0 or intval >= 2 ** 32: + raise struct.error("integer out of range for 'I' format code") + return struct.pack(b'!I', intval) + elif length == 16: + if intval < 0 or intval >= 2 ** 128: + raise struct.error("integer out of range for 'QQ' format code") + return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) + else: + raise NotImplementedError() + + +if hasattr(int, 'bit_length'): + # Not int.bit_length , since that won't work in 2.7 where long exists + def _compat_bit_length(i): + return i.bit_length() +else: + def _compat_bit_length(i): + for res in itertools.count(): + if i >> res == 0: + return res + + +def _compat_range(start, end, step=1): + assert step > 0 + i = start + while i < end: + yield i + i += step + + +class _TotalOrderingMixin(object): + __slots__ = () + + # Helper that derives the other comparison operations from + # __lt__ and __eq__ + # We avoid functools.total_ordering because it doesn't handle + # NotImplemented correctly yet (http://bugs.python.org/issue10042) + def __eq__(self, other): + raise NotImplementedError + + def __ne__(self, other): + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not equal + + def __lt__(self, other): + raise NotImplementedError + + def __le__(self, other): + less = self.__lt__(other) + if less is NotImplemented or not less: + return self.__eq__(other) + return less + + def __gt__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not (less or equal) + + def __ge__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + return not less + + +IPV4LENGTH = 32 +IPV6LENGTH = 128 + + +class AddressValueError(ValueError): + """A Value Error related to the address.""" + + +class NetmaskValueError(ValueError): + """A Value Error related to the netmask.""" + + +def ip_address(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Address or IPv6Address object. + + Raises: + ValueError: if the *address* passed isn't either a v4 or a v6 + address + + """ + try: + return IPv4Address(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Address(address) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + '%r does not appear to be an IPv4 or IPv6 address. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?' % address) + + raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % + address) + + +def ip_network(address, strict=True): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP network. Either IPv4 or + IPv6 networks may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Network or IPv6Network object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. Or if the network has host bits set. + + """ + try: + return IPv4Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + '%r does not appear to be an IPv4 or IPv6 network. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?' % address) + + raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % + address) + + +def ip_interface(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Interface or IPv6Interface object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. + + Notes: + The IPv?Interface classes describe an Address on a particular + Network, so they're basically a combination of both the Address + and Network classes. + + """ + try: + return IPv4Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % + address) + + +def v4_int_to_packed(address): + """Represent an address as 4 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv4 IP address. + + Returns: + The integer address packed as 4 bytes in network (big-endian) order. + + Raises: + ValueError: If the integer is negative or too large to be an + IPv4 IP address. + + """ + try: + return _compat_to_bytes(address, 4, 'big') + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv4") + + +def v6_int_to_packed(address): + """Represent an address as 16 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv6 IP address. + + Returns: + The integer address packed as 16 bytes in network (big-endian) order. + + """ + try: + return _compat_to_bytes(address, 16, 'big') + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv6") + + +def _split_optional_netmask(address): + """Helper to split the netmask and raise AddressValueError if needed""" + addr = _compat_str(address).split('/') + if len(addr) > 2: + raise AddressValueError("Only one '/' permitted in %r" % address) + return addr + + +def _find_address_range(addresses): + """Find a sequence of sorted deduplicated IPv#Address. + + Args: + addresses: a list of IPv#Address objects. + + Yields: + A tuple containing the first and last IP addresses in the sequence. + + """ + it = iter(addresses) + first = last = next(it) # pylint: disable=stop-iteration-return + for ip in it: + if ip._ip != last._ip + 1: + yield first, last + first = ip + last = ip + yield first, last + + +def _count_righthand_zero_bits(number, bits): + """Count the number of zero bits on the right hand side. + + Args: + number: an integer. + bits: maximum number of bits to count. + + Returns: + The number of zero bits on the right hand side of the number. + + """ + if number == 0: + return bits + return min(bits, _compat_bit_length(~number & (number - 1))) + + +def summarize_address_range(first, last): + """Summarize a network range given the first and last IP addresses. + + Example: + >>> list(summarize_address_range(IPv4Address('192.0.2.0'), + ... IPv4Address('192.0.2.130'))) + ... #doctest: +NORMALIZE_WHITESPACE + [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), + IPv4Network('192.0.2.130/32')] + + Args: + first: the first IPv4Address or IPv6Address in the range. + last: the last IPv4Address or IPv6Address in the range. + + Returns: + An iterator of the summarized IPv(4|6) network objects. + + Raise: + TypeError: + If the first and last objects are not IP addresses. + If the first and last objects are not the same version. + ValueError: + If the last object is not greater than the first. + If the version of the first address is not 4 or 6. + + """ + if (not (isinstance(first, _BaseAddress) and + isinstance(last, _BaseAddress))): + raise TypeError('first and last must be IP addresses, not networks') + if first.version != last.version: + raise TypeError("%s and %s are not of the same version" % ( + first, last)) + if first > last: + raise ValueError('last IP address must be greater than first') + + if first.version == 4: + ip = IPv4Network + elif first.version == 6: + ip = IPv6Network + else: + raise ValueError('unknown IP version') + + ip_bits = first._max_prefixlen + first_int = first._ip + last_int = last._ip + while first_int <= last_int: + nbits = min(_count_righthand_zero_bits(first_int, ip_bits), + _compat_bit_length(last_int - first_int + 1) - 1) + net = ip((first_int, ip_bits - nbits)) + yield net + first_int += 1 << nbits + if first_int - 1 == ip._ALL_ONES: + break + + +def _collapse_addresses_internal(addresses): + """Loops through the addresses, collapsing concurrent netblocks. + + Example: + + ip1 = IPv4Network('192.0.2.0/26') + ip2 = IPv4Network('192.0.2.64/26') + ip3 = IPv4Network('192.0.2.128/26') + ip4 = IPv4Network('192.0.2.192/26') + + _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> + [IPv4Network('192.0.2.0/24')] + + This shouldn't be called directly; it is called via + collapse_addresses([]). + + Args: + addresses: A list of IPv4Network's or IPv6Network's + + Returns: + A list of IPv4Network's or IPv6Network's depending on what we were + passed. + + """ + # First merge + to_merge = list(addresses) + subnets = {} + while to_merge: + net = to_merge.pop() + supernet = net.supernet() + existing = subnets.get(supernet) + if existing is None: + subnets[supernet] = net + elif existing != net: + # Merge consecutive subnets + del subnets[supernet] + to_merge.append(supernet) + # Then iterate over resulting networks, skipping subsumed subnets + last = None + for net in sorted(subnets.values()): + if last is not None: + # Since they are sorted, + # last.network_address <= net.network_address is a given. + if last.broadcast_address >= net.broadcast_address: + continue + yield net + last = net + + +def collapse_addresses(addresses): + """Collapse a list of IP objects. + + Example: + collapse_addresses([IPv4Network('192.0.2.0/25'), + IPv4Network('192.0.2.128/25')]) -> + [IPv4Network('192.0.2.0/24')] + + Args: + addresses: An iterator of IPv4Network or IPv6Network objects. + + Returns: + An iterator of the collapsed IPv(4|6)Network objects. + + Raises: + TypeError: If passed a list of mixed version objects. + + """ + addrs = [] + ips = [] + nets = [] + + # split IP addresses and networks + for ip in addresses: + if isinstance(ip, _BaseAddress): + if ips and ips[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + ips.append(ip) + elif ip._prefixlen == ip._max_prefixlen: + if ips and ips[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + try: + ips.append(ip.ip) + except AttributeError: + ips.append(ip.network_address) + else: + if nets and nets[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, nets[-1])) + nets.append(ip) + + # sort and dedup + ips = sorted(set(ips)) + + # find consecutive address ranges in the sorted sequence and summarize them + if ips: + for first, last in _find_address_range(ips): + addrs.extend(summarize_address_range(first, last)) + + return _collapse_addresses_internal(addrs + nets) + + +def get_mixed_type_key(obj): + """Return a key suitable for sorting between networks and addresses. + + Address and Network objects are not sortable by default; they're + fundamentally different so the expression + + IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') + + doesn't make any sense. There are some times however, where you may wish + to have ipaddress sort these for you anyway. If you need to do this, you + can use this function as the key= argument to sorted(). + + Args: + obj: either a Network or Address object. + Returns: + appropriate key. + + """ + if isinstance(obj, _BaseNetwork): + return obj._get_networks_key() + elif isinstance(obj, _BaseAddress): + return obj._get_address_key() + return NotImplemented + + +class _IPAddressBase(_TotalOrderingMixin): + + """The mother class.""" + + __slots__ = () + + @property + def exploded(self): + """Return the longhand version of the IP address as a string.""" + return self._explode_shorthand_ip_string() + + @property + def compressed(self): + """Return the shorthand version of the IP address as a string.""" + return _compat_str(self) + + @property + def reverse_pointer(self): + """The name of the reverse DNS pointer for the IP address, e.g.: + >>> ipaddress.ip_address("127.0.0.1").reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> ipaddress.ip_address("2001:db8::1").reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' + + """ + return self._reverse_pointer() + + @property + def version(self): + msg = '%200s has no version specified' % (type(self),) + raise NotImplementedError(msg) + + def _check_int_address(self, address): + if address < 0: + msg = "%d (< 0) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._version)) + if address > self._ALL_ONES: + msg = "%d (>= 2**%d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._max_prefixlen, + self._version)) + + def _check_packed_address(self, address, expected_len): + address_len = len(address) + if address_len != expected_len: + msg = ( + '%r (len %d != %d) is not permitted as an IPv%d address. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?') + raise AddressValueError(msg % (address, address_len, + expected_len, self._version)) + + @classmethod + def _ip_int_from_prefix(cls, prefixlen): + """Turn the prefix length into a bitwise netmask + + Args: + prefixlen: An integer, the prefix length. + + Returns: + An integer. + + """ + return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) + + @classmethod + def _prefix_from_ip_int(cls, ip_int): + """Return prefix length from the bitwise netmask. + + Args: + ip_int: An integer, the netmask in expanded bitwise format + + Returns: + An integer, the prefix length. + + Raises: + ValueError: If the input intermingles zeroes & ones + """ + trailing_zeroes = _count_righthand_zero_bits(ip_int, + cls._max_prefixlen) + prefixlen = cls._max_prefixlen - trailing_zeroes + leading_ones = ip_int >> trailing_zeroes + all_ones = (1 << prefixlen) - 1 + if leading_ones != all_ones: + byteslen = cls._max_prefixlen // 8 + details = _compat_to_bytes(ip_int, byteslen, 'big') + msg = 'Netmask pattern %r mixes zeroes & ones' + raise ValueError(msg % details) + return prefixlen + + @classmethod + def _report_invalid_netmask(cls, netmask_str): + msg = '%r is not a valid netmask' % netmask_str + raise NetmaskValueError(msg) + + @classmethod + def _prefix_from_prefix_string(cls, prefixlen_str): + """Return prefix length from a numeric string + + Args: + prefixlen_str: The string to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask + """ + # int allows a leading +/- as well as surrounding whitespace, + # so we ensure that isn't the case + if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): + cls._report_invalid_netmask(prefixlen_str) + try: + prefixlen = int(prefixlen_str) + except ValueError: + cls._report_invalid_netmask(prefixlen_str) + if not (0 <= prefixlen <= cls._max_prefixlen): + cls._report_invalid_netmask(prefixlen_str) + return prefixlen + + @classmethod + def _prefix_from_ip_string(cls, ip_str): + """Turn a netmask/hostmask string into a prefix length + + Args: + ip_str: The netmask/hostmask to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask/hostmask + """ + # Parse the netmask/hostmask like an IP address. + try: + ip_int = cls._ip_int_from_string(ip_str) + except AddressValueError: + cls._report_invalid_netmask(ip_str) + + # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). + # Note that the two ambiguous cases (all-ones and all-zeroes) are + # treated as netmasks. + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + pass + + # Invert the bits, and try matching a /0+1+/ hostmask instead. + ip_int ^= cls._ALL_ONES + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + cls._report_invalid_netmask(ip_str) + + def __reduce__(self): + return self.__class__, (_compat_str(self),) + + +class _BaseAddress(_IPAddressBase): + + """A generic IP object. + + This IP class contains the version independent methods which are + used by single IP addresses. + """ + + __slots__ = () + + def __int__(self): + return self._ip + + def __eq__(self, other): + try: + return (self._ip == other._ip and + self._version == other._version) + except AttributeError: + return NotImplemented + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseAddress): + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + if self._version != other._version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self._ip != other._ip: + return self._ip < other._ip + return False + + # Shorthand for Integer addition and subtraction. This is not + # meant to ever support addition/subtraction of addresses. + def __add__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) + other) + + def __sub__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) - other) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return _compat_str(self._string_from_ip_int(self._ip)) + + def __hash__(self): + return hash(hex(int(self._ip))) + + def _get_address_key(self): + return (self._version, self) + + def __reduce__(self): + return self.__class__, (self._ip,) + + +class _BaseNetwork(_IPAddressBase): + + """A generic IP network object. + + This IP class contains the version independent methods which are + used by networks. + + """ + def __init__(self, address): + self._cache = {} + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return '%s/%d' % (self.network_address, self.prefixlen) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the network + or broadcast addresses. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast): + yield self._address_class(x) + + def __iter__(self): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network, broadcast + 1): + yield self._address_class(x) + + def __getitem__(self, n): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + raise IndexError('address out of range') + return self._address_class(network + n) + else: + n += 1 + if broadcast + n < network: + raise IndexError('address out of range') + return self._address_class(broadcast + n) + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseNetwork): + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + if self._version != other._version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self.network_address != other.network_address: + return self.network_address < other.network_address + if self.netmask != other.netmask: + return self.netmask < other.netmask + return False + + def __eq__(self, other): + try: + return (self._version == other._version and + self.network_address == other.network_address and + int(self.netmask) == int(other.netmask)) + except AttributeError: + return NotImplemented + + def __hash__(self): + return hash(int(self.network_address) ^ int(self.netmask)) + + def __contains__(self, other): + # always false if one is v4 and the other is v6. + if self._version != other._version: + return False + # dealing with another network. + if isinstance(other, _BaseNetwork): + return False + # dealing with another address + else: + # address + return (int(self.network_address) <= int(other._ip) <= + int(self.broadcast_address)) + + def overlaps(self, other): + """Tell if self is partly contained in other.""" + return self.network_address in other or ( + self.broadcast_address in other or ( + other.network_address in self or ( + other.broadcast_address in self))) + + @property + def broadcast_address(self): + x = self._cache.get('broadcast_address') + if x is None: + x = self._address_class(int(self.network_address) | + int(self.hostmask)) + self._cache['broadcast_address'] = x + return x + + @property + def hostmask(self): + x = self._cache.get('hostmask') + if x is None: + x = self._address_class(int(self.netmask) ^ self._ALL_ONES) + self._cache['hostmask'] = x + return x + + @property + def with_prefixlen(self): + return '%s/%d' % (self.network_address, self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self.network_address, self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self.network_address, self.hostmask) + + @property + def num_addresses(self): + """Number of hosts in the current subnet.""" + return int(self.broadcast_address) - int(self.network_address) + 1 + + @property + def _address_class(self): + # Returning bare address objects (rather than interfaces) allows for + # more consistent behaviour across the network address, broadcast + # address and individual host addresses. + msg = '%200s has no associated address class' % (type(self),) + raise NotImplementedError(msg) + + @property + def prefixlen(self): + return self._prefixlen + + def address_exclude(self, other): + """Remove an address from a larger block. + + For example: + + addr1 = ip_network('192.0.2.0/28') + addr2 = ip_network('192.0.2.1/32') + list(addr1.address_exclude(addr2)) = + [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), + IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] + + or IPv6: + + addr1 = ip_network('2001:db8::1/32') + addr2 = ip_network('2001:db8::1/128') + list(addr1.address_exclude(addr2)) = + [ip_network('2001:db8::1/128'), + ip_network('2001:db8::2/127'), + ip_network('2001:db8::4/126'), + ip_network('2001:db8::8/125'), + ... + ip_network('2001:db8:8000::/33')] + + Args: + other: An IPv4Network or IPv6Network object of the same type. + + Returns: + An iterator of the IPv(4|6)Network objects which is self + minus other. + + Raises: + TypeError: If self and other are of differing address + versions, or if other is not a network object. + ValueError: If other is not completely contained by self. + + """ + if not self._version == other._version: + raise TypeError("%s and %s are not of the same version" % ( + self, other)) + + if not isinstance(other, _BaseNetwork): + raise TypeError("%s is not a network object" % other) + + if not other.subnet_of(self): + raise ValueError('%s not contained in %s' % (other, self)) + if other == self: + return + + # Make sure we're comparing the network of other. + other = other.__class__('%s/%s' % (other.network_address, + other.prefixlen)) + + s1, s2 = self.subnets() + while s1 != other and s2 != other: + if other.subnet_of(s1): + yield s2 + s1, s2 = s1.subnets() + elif other.subnet_of(s2): + yield s1 + s1, s2 = s2.subnets() + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + if s1 == other: + yield s2 + elif s2 == other: + yield s1 + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + + def compare_networks(self, other): + """Compare two IP objects. + + This is only concerned about the comparison of the integer + representation of the network addresses. This means that the + host bits aren't considered at all in this method. If you want + to compare host bits, you can easily enough do a + 'HostA._ip < HostB._ip' + + Args: + other: An IP object. + + Returns: + If the IP versions of self and other are the same, returns: + + -1 if self < other: + eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') + IPv6Network('2001:db8::1000/124') < + IPv6Network('2001:db8::2000/124') + 0 if self == other + eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') + IPv6Network('2001:db8::1000/124') == + IPv6Network('2001:db8::1000/124') + 1 if self > other + eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') + IPv6Network('2001:db8::2000/124') > + IPv6Network('2001:db8::1000/124') + + Raises: + TypeError if the IP versions are different. + + """ + # does this need to raise a ValueError? + if self._version != other._version: + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + # self._version == other._version below here: + if self.network_address < other.network_address: + return -1 + if self.network_address > other.network_address: + return 1 + # self.network_address == other.network_address below here: + if self.netmask < other.netmask: + return -1 + if self.netmask > other.netmask: + return 1 + return 0 + + def _get_networks_key(self): + """Network-only key function. + + Returns an object that identifies this address' network and + netmask. This function is a suitable "key" argument for sorted() + and list.sort(). + + """ + return (self._version, self.network_address, self.netmask) + + def subnets(self, prefixlen_diff=1, new_prefix=None): + """The subnets which join to make the current subnet. + + In the case that self contains only one IP + (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 + for IPv6), yield an iterator with just ourself. + + Args: + prefixlen_diff: An integer, the amount the prefix length + should be increased by. This should not be set if + new_prefix is also set. + new_prefix: The desired new prefix length. This must be a + larger number (smaller prefix) than the existing prefix. + This should not be set if prefixlen_diff is also set. + + Returns: + An iterator of IPv(4|6) objects. + + Raises: + ValueError: The prefixlen_diff is too small or too large. + OR + prefixlen_diff and new_prefix are both set or new_prefix + is a smaller number than the current prefix (smaller + number means a larger network) + + """ + if self._prefixlen == self._max_prefixlen: + yield self + return + + if new_prefix is not None: + if new_prefix < self._prefixlen: + raise ValueError('new prefix must be longer') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = new_prefix - self._prefixlen + + if prefixlen_diff < 0: + raise ValueError('prefix length diff must be > 0') + new_prefixlen = self._prefixlen + prefixlen_diff + + if new_prefixlen > self._max_prefixlen: + raise ValueError( + 'prefix length diff %d is invalid for netblock %s' % ( + new_prefixlen, self)) + + start = int(self.network_address) + end = int(self.broadcast_address) + 1 + step = (int(self.hostmask) + 1) >> prefixlen_diff + for new_addr in _compat_range(start, end, step): + current = self.__class__((new_addr, new_prefixlen)) + yield current + + def supernet(self, prefixlen_diff=1, new_prefix=None): + """The supernet containing the current network. + + Args: + prefixlen_diff: An integer, the amount the prefix length of + the network should be decreased by. For example, given a + /24 network and a prefixlen_diff of 3, a supernet with a + /21 netmask is returned. + + Returns: + An IPv4 network object. + + Raises: + ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have + a negative prefix length. + OR + If prefixlen_diff and new_prefix are both set or new_prefix is a + larger number than the current prefix (larger number means a + smaller network) + + """ + if self._prefixlen == 0: + return self + + if new_prefix is not None: + if new_prefix > self._prefixlen: + raise ValueError('new prefix must be shorter') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = self._prefixlen - new_prefix + + new_prefixlen = self.prefixlen - prefixlen_diff + if new_prefixlen < 0: + raise ValueError( + 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % + (self.prefixlen, prefixlen_diff)) + return self.__class__(( + int(self.network_address) & (int(self.netmask) << prefixlen_diff), + new_prefixlen)) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return (self.network_address.is_multicast and + self.broadcast_address.is_multicast) + + @staticmethod + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a._version != b._version: + raise TypeError("%s and %s are not of the same version" % (a, b)) + return (b.network_address <= a.network_address and + b.broadcast_address >= a.broadcast_address) + except AttributeError: + raise TypeError("Unable to test subnet containment " + "between %s and %s" % (a, b)) + + def subnet_of(self, other): + """Return True if this network is a subnet of other.""" + return self._is_subnet_of(self, other) + + def supernet_of(self, other): + """Return True if this network is a supernet of other.""" + return self._is_subnet_of(other, self) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return (self.network_address.is_reserved and + self.broadcast_address.is_reserved) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return (self.network_address.is_link_local and + self.broadcast_address.is_link_local) + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return (self.network_address.is_private and + self.broadcast_address.is_private) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return (self.network_address.is_unspecified and + self.broadcast_address.is_unspecified) + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return (self.network_address.is_loopback and + self.broadcast_address.is_loopback) + + +class _BaseV4(object): + + """Base IPv4 object. + + The following methods are used by IPv4 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 4 + # Equivalent to 255.255.255.255 or 32 bits of 1's. + _ALL_ONES = (2 ** IPV4LENGTH) - 1 + _DECIMAL_DIGITS = frozenset('0123456789') + + # the valid octets for host and netmasks. only useful for IPv4. + _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) + + _max_prefixlen = IPV4LENGTH + # There are only a handful of valid v4 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + def _explode_shorthand_ip_string(self): + return _compat_str(self) + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + try: + # Check for a netmask in prefix length form + prefixlen = cls._prefix_from_prefix_string(arg) + except NetmaskValueError: + # Check for a netmask or hostmask in dotted-quad form. + # This may raise NetmaskValueError. + prefixlen = cls._prefix_from_ip_string(arg) + netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn the given IP string into an integer for comparison. + + Args: + ip_str: A string, the IP ip_str. + + Returns: + The IP ip_str as an integer. + + Raises: + AddressValueError: if ip_str isn't a valid IPv4 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + octets = ip_str.split('.') + if len(octets) != 4: + raise AddressValueError("Expected 4 octets in %r" % ip_str) + + try: + return _compat_int_from_byte_vals( + map(cls._parse_octet, octets), 'big') + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_octet(cls, octet_str): + """Convert a decimal octet into an integer. + + Args: + octet_str: A string, the number to parse. + + Returns: + The octet as an integer. + + Raises: + ValueError: if the octet isn't strictly a decimal from [0..255]. + + """ + if not octet_str: + raise ValueError("Empty octet not permitted") + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._DECIMAL_DIGITS.issuperset(octet_str): + msg = "Only decimal digits permitted in %r" + raise ValueError(msg % octet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(octet_str) > 3: + msg = "At most 3 characters permitted in %r" + raise ValueError(msg % octet_str) + # Convert to integer (we know digits are legal) + octet_int = int(octet_str, 10) + # Any octets that look like they *might* be written in octal, + # and which don't look exactly the same in both octal and + # decimal are rejected as ambiguous + if octet_int > 7 and octet_str[0] == '0': + msg = "Ambiguous (octal/decimal) value in %r not permitted" + raise ValueError(msg % octet_str) + if octet_int > 255: + raise ValueError("Octet %d (> 255) not permitted" % octet_int) + return octet_int + + @classmethod + def _string_from_ip_int(cls, ip_int): + """Turns a 32-bit integer into dotted decimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + The IP address as a string in dotted decimal notation. + + """ + return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] + if isinstance(b, bytes) + else b) + for b in _compat_to_bytes(ip_int, 4, 'big')) + + def _is_hostmask(self, ip_str): + """Test if the IP string is a hostmask (rather than a netmask). + + Args: + ip_str: A string, the potential hostmask. + + Returns: + A boolean, True if the IP string is a hostmask. + + """ + bits = ip_str.split('.') + try: + parts = [x for x in map(int, bits) if x in self._valid_mask_octets] + except ValueError: + return False + if len(parts) != len(bits): + return False + if parts[0] < parts[-1]: + return True + return False + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv4 address. + + This implements the method described in RFC1035 3.5. + + """ + reverse_octets = _compat_str(self).split('.')[::-1] + return '.'.join(reverse_octets) + '.in-addr.arpa' + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv4Address(_BaseV4, _BaseAddress): + + """Represent and manipulate single IPv4 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + + """ + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv4Address('192.0.2.1') == IPv4Address(3221225985). + or, more generally + IPv4Address(int(IPv4Address('192.0.2.1'))) == + IPv4Address('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 4) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, 'big') + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if '/' in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v4_int_to_packed(self._ip) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within the + reserved IPv4 Network range. + + """ + return self in self._constants._reserved_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + return ( + self not in self._constants._public_network and + not self.is_private) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is multicast. + See RFC 3171 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 5735 3. + + """ + return self == self._constants._unspecified_address + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback per RFC 3330. + + """ + return self in self._constants._loopback_network + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is link-local per RFC 3927. + + """ + return self in self._constants._linklocal_network + + +class IPv4Interface(IPv4Address): + + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv4Address.__init__(self, address) + self.network = IPv4Network(self._ip) + self._prefixlen = self._max_prefixlen + return + + if isinstance(address, tuple): + IPv4Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + + self.network = IPv4Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv4Address.__init__(self, addr[0]) + + self.network = IPv4Network(address, strict=False) + self._prefixlen = self.network._prefixlen + + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self.network.prefixlen) + + def __eq__(self, other): + address_equal = IPv4Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv4Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv4Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + +class IPv4Network(_BaseV4, _BaseNetwork): + + """This class represents and manipulates 32-bit IPv4 network + addresses.. + + Attributes: [examples for IPv4Network('192.0.2.0/27')] + .network_address: IPv4Address('192.0.2.0') + .hostmask: IPv4Address('0.0.0.31') + .broadcast_address: IPv4Address('192.0.2.32') + .netmask: IPv4Address('255.255.255.224') + .prefixlen: 27 + + """ + # Class to use when creating address objects + _address_class = IPv4Address + + def __init__(self, address, strict=True): + + """Instantiate a new IPv4 network object. + + Args: + address: A string or integer representing the IP [& network]. + '192.0.2.0/24' + '192.0.2.0/255.255.255.0' + '192.0.0.2/0.0.0.255' + are all functionally the same in IPv4. Similarly, + '192.0.2.1' + '192.0.2.1/255.255.255.255' + '192.0.2.1/32' + are also functionally equivalent. That is to say, failing to + provide a subnetmask will create an object with a mask of /32. + + If the mask (portion after the / in the argument) is given in + dotted quad form, it is treated as a netmask if it starts with a + non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it + starts with a zero field (e.g. 0.255.255.255 == /8), with the + single exception of an all-zero mask which is treated as a + netmask == /0. If no mask is given, a default of /32 is used. + + Additionally, an integer can be passed, so + IPv4Network('192.0.2.1') == IPv4Network(3221225985) + or, more generally + IPv4Interface(int(IPv4Interface('192.0.2.1'))) == + IPv4Interface('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + NetmaskValueError: If the netmask isn't valid for + an IPv4 address. + ValueError: If strict is True and a network address is not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Constructing from a packed address or integer + if isinstance(address, (_compat_int_types, bytes)): + self.network_address = IPv4Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen) + # fixme: address/network test here. + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + # We weren't given an address[1] + arg = self._max_prefixlen + self.network_address = IPv4Address(address[0]) + self.netmask, self._prefixlen = self._make_netmask(arg) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv4Address(packed & + int(self.netmask)) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if (IPv4Address(int(self.network_address) & int(self.netmask)) != + self.network_address): + raise ValueError('%s has host bits set' % self) + self.network_address = IPv4Address(int(self.network_address) & + int(self.netmask)) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry. + + """ + return (not (self.network_address in IPv4Network('100.64.0.0/10') and + self.broadcast_address in IPv4Network('100.64.0.0/10')) and + not self.is_private) + + +class _IPv4Constants(object): + + _linklocal_network = IPv4Network('169.254.0.0/16') + + _loopback_network = IPv4Network('127.0.0.0/8') + + _multicast_network = IPv4Network('224.0.0.0/4') + + _public_network = IPv4Network('100.64.0.0/10') + + _private_networks = [ + IPv4Network('0.0.0.0/8'), + IPv4Network('10.0.0.0/8'), + IPv4Network('127.0.0.0/8'), + IPv4Network('169.254.0.0/16'), + IPv4Network('172.16.0.0/12'), + IPv4Network('192.0.0.0/29'), + IPv4Network('192.0.0.170/31'), + IPv4Network('192.0.2.0/24'), + IPv4Network('192.168.0.0/16'), + IPv4Network('198.18.0.0/15'), + IPv4Network('198.51.100.0/24'), + IPv4Network('203.0.113.0/24'), + IPv4Network('240.0.0.0/4'), + IPv4Network('255.255.255.255/32'), + ] + + _reserved_network = IPv4Network('240.0.0.0/4') + + _unspecified_address = IPv4Address('0.0.0.0') + + +IPv4Address._constants = _IPv4Constants + + +class _BaseV6(object): + + """Base IPv6 object. + + The following methods are used by IPv6 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 6 + _ALL_ONES = (2 ** IPV6LENGTH) - 1 + _HEXTET_COUNT = 8 + _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') + _max_prefixlen = IPV6LENGTH + + # There are only a bunch of valid v6 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + prefixlen = cls._prefix_from_prefix_string(arg) + netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn an IPv6 ip_str into an integer. + + Args: + ip_str: A string, the IPv6 ip_str. + + Returns: + An int, the IPv6 address + + Raises: + AddressValueError: if ip_str isn't a valid IPv6 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + parts = ip_str.split(':') + + # An IPv6 address needs at least 2 colons (3 parts). + _min_parts = 3 + if len(parts) < _min_parts: + msg = "At least %d parts expected in %r" % (_min_parts, ip_str) + raise AddressValueError(msg) + + # If the address has an IPv4-style suffix, convert it to hexadecimal. + if '.' in parts[-1]: + try: + ipv4_int = IPv4Address(parts.pop())._ip + except AddressValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) + parts.append('%x' % (ipv4_int & 0xFFFF)) + + # An IPv6 address can't have more than 8 colons (9 parts). + # The extra colon comes from using the "::" notation for a single + # leading or trailing zero part. + _max_parts = cls._HEXTET_COUNT + 1 + if len(parts) > _max_parts: + msg = "At most %d colons permitted in %r" % ( + _max_parts - 1, ip_str) + raise AddressValueError(msg) + + # Disregarding the endpoints, find '::' with nothing in between. + # This indicates that a run of zeroes has been skipped. + skip_index = None + for i in _compat_range(1, len(parts) - 1): + if not parts[i]: + if skip_index is not None: + # Can't have more than one '::' + msg = "At most one '::' permitted in %r" % ip_str + raise AddressValueError(msg) + skip_index = i + + # parts_hi is the number of parts to copy from above/before the '::' + # parts_lo is the number of parts to copy from below/after the '::' + if skip_index is not None: + # If we found a '::', then check if it also covers the endpoints. + parts_hi = skip_index + parts_lo = len(parts) - skip_index - 1 + if not parts[0]: + parts_hi -= 1 + if parts_hi: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + parts_lo -= 1 + if parts_lo: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) + if parts_skipped < 1: + msg = "Expected at most %d other parts with '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) + else: + # Otherwise, allocate the entire address to parts_hi. The + # endpoints could still be empty, but _parse_hextet() will check + # for that. + if len(parts) != cls._HEXTET_COUNT: + msg = "Exactly %d parts expected without '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) + if not parts[0]: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_hi = len(parts) + parts_lo = 0 + parts_skipped = 0 + + try: + # Now, parse the hextets into a 128-bit integer. + ip_int = 0 + for i in range(parts_hi): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + ip_int <<= 16 * parts_skipped + for i in range(-parts_lo, 0): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + return ip_int + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_hextet(cls, hextet_str): + """Convert an IPv6 hextet string into an integer. + + Args: + hextet_str: A string, the number to parse. + + Returns: + The hextet as an integer. + + Raises: + ValueError: if the input isn't strictly a hex number from + [0..FFFF]. + + """ + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._HEX_DIGITS.issuperset(hextet_str): + raise ValueError("Only hex digits permitted in %r" % hextet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(hextet_str) > 4: + msg = "At most 4 characters permitted in %r" + raise ValueError(msg % hextet_str) + # Length check means we can skip checking the integer value + return int(hextet_str, 16) + + @classmethod + def _compress_hextets(cls, hextets): + """Compresses a list of hextets. + + Compresses a list of strings, replacing the longest continuous + sequence of "0" in the list with "" and adding empty strings at + the beginning or at the end of the string such that subsequently + calling ":".join(hextets) will produce the compressed version of + the IPv6 address. + + Args: + hextets: A list of strings, the hextets to compress. + + Returns: + A list of strings. + + """ + best_doublecolon_start = -1 + best_doublecolon_len = 0 + doublecolon_start = -1 + doublecolon_len = 0 + for index, hextet in enumerate(hextets): + if hextet == '0': + doublecolon_len += 1 + if doublecolon_start == -1: + # Start of a sequence of zeros. + doublecolon_start = index + if doublecolon_len > best_doublecolon_len: + # This is the longest sequence of zeros so far. + best_doublecolon_len = doublecolon_len + best_doublecolon_start = doublecolon_start + else: + doublecolon_len = 0 + doublecolon_start = -1 + + if best_doublecolon_len > 1: + best_doublecolon_end = (best_doublecolon_start + + best_doublecolon_len) + # For zeros at the end of the address. + if best_doublecolon_end == len(hextets): + hextets += [''] + hextets[best_doublecolon_start:best_doublecolon_end] = [''] + # For zeros at the beginning of the address. + if best_doublecolon_start == 0: + hextets = [''] + hextets + + return hextets + + @classmethod + def _string_from_ip_int(cls, ip_int=None): + """Turns a 128-bit integer into hexadecimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + A string, the hexadecimal representation of the address. + + Raises: + ValueError: The address is bigger than 128 bits of all ones. + + """ + if ip_int is None: + ip_int = int(cls._ip) + + if ip_int > cls._ALL_ONES: + raise ValueError('IPv6 address is too large') + + hex_str = '%032x' % ip_int + hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] + + hextets = cls._compress_hextets(hextets) + return ':'.join(hextets) + + def _explode_shorthand_ip_string(self): + """Expand a shortened IPv6 address. + + Args: + ip_str: A string, the IPv6 address. + + Returns: + A string, the expanded IPv6 address. + + """ + if isinstance(self, IPv6Network): + ip_str = _compat_str(self.network_address) + elif isinstance(self, IPv6Interface): + ip_str = _compat_str(self.ip) + else: + ip_str = _compat_str(self) + + ip_int = self._ip_int_from_string(ip_str) + hex_str = '%032x' % ip_int + parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] + if isinstance(self, (_BaseNetwork, IPv6Interface)): + return '%s/%d' % (':'.join(parts), self._prefixlen) + return ':'.join(parts) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv6 address. + + This implements the method described in RFC3596 2.5. + + """ + reverse_chars = self.exploded[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv6Address(_BaseV6, _BaseAddress): + + """Represent and manipulate single IPv6 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + """Instantiate a new IPv6 address object. + + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv6Address('2001:db8::') == + IPv6Address(42540766411282592856903984951653826560) + or, more generally + IPv6Address(int(IPv6Address('2001:db8::'))) == + IPv6Address('2001:db8::') + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 16) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, 'big') + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if '/' in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v6_int_to_packed(self._ip) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return any(self in x for x in self._constants._reserved_networks) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return self in self._constants._linklocal_network + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return self in self._constants._sitelocal_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv6-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, true if the address is not reserved per + iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return self._ip == 0 + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return self._ip == 1 + + @property + def ipv4_mapped(self): + """Return the IPv4 mapped address. + + Returns: + If the IPv6 address is a v4 mapped address, return the + IPv4 mapped address. Return None otherwise. + + """ + if (self._ip >> 32) != 0xFFFF: + return None + return IPv4Address(self._ip & 0xFFFFFFFF) + + @property + def teredo(self): + """Tuple of embedded teredo IPs. + + Returns: + Tuple of the (server, client) IPs or None if the address + doesn't appear to be a teredo address (doesn't start with + 2001::/32) + + """ + if (self._ip >> 96) != 0x20010000: + return None + return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), + IPv4Address(~self._ip & 0xFFFFFFFF)) + + @property + def sixtofour(self): + """Return the IPv4 6to4 embedded address. + + Returns: + The IPv4 6to4-embedded address if present or None if the + address doesn't appear to contain a 6to4 embedded address. + + """ + if (self._ip >> 112) != 0x2002: + return None + return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) + + +class IPv6Interface(IPv6Address): + + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv6Address.__init__(self, address) + self.network = IPv6Network(self._ip) + self._prefixlen = self._max_prefixlen + return + if isinstance(address, tuple): + IPv6Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv6Address.__init__(self, addr[0]) + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + self.hostmask = self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self.network.prefixlen) + + def __eq__(self, other): + address_equal = IPv6Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv6Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv6Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + @property + def is_unspecified(self): + return self._ip == 0 and self.network.is_unspecified + + @property + def is_loopback(self): + return self._ip == 1 and self.network.is_loopback + + +class IPv6Network(_BaseV6, _BaseNetwork): + + """This class represents and manipulates 128-bit IPv6 networks. + + Attributes: [examples for IPv6('2001:db8::1000/124')] + .network_address: IPv6Address('2001:db8::1000') + .hostmask: IPv6Address('::f') + .broadcast_address: IPv6Address('2001:db8::100f') + .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') + .prefixlen: 124 + + """ + + # Class to use when creating address objects + _address_class = IPv6Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv6 Network object. + + Args: + address: A string or integer representing the IPv6 network or the + IP and prefix/netmask. + '2001:db8::/128' + '2001:db8:0000:0000:0000:0000:0000:0000/128' + '2001:db8::' + are all functionally the same in IPv6. That is to say, + failing to provide a subnetmask will create an object with + a mask of /128. + + Additionally, an integer can be passed, so + IPv6Network('2001:db8::') == + IPv6Network(42540766411282592856903984951653826560) + or, more generally + IPv6Network(int(IPv6Network('2001:db8::'))) == + IPv6Network('2001:db8::') + + strict: A boolean. If true, ensure that we have been passed + A true network address, eg, 2001:db8::1000/124 and not an + IP address on a network, eg, 2001:db8::1/124. + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + NetmaskValueError: If the netmask isn't valid for + an IPv6 address. + ValueError: If strict was True and a network address was not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Efficient constructor from integer or packed address + if isinstance(address, (bytes, _compat_int_types)): + self.network_address = IPv6Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen) + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + self.network_address = IPv6Address(address[0]) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv6Address(packed & + int(self.netmask)) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + + self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if (IPv6Address(int(self.network_address) & int(self.netmask)) != + self.network_address): + raise ValueError('%s has host bits set' % self) + self.network_address = IPv6Address(int(self.network_address) & + int(self.netmask)) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the + Subnet-Router anycast address. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast + 1): + yield self._address_class(x) + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return (self.network_address.is_site_local and + self.broadcast_address.is_site_local) + + +class _IPv6Constants(object): + + _linklocal_network = IPv6Network('fe80::/10') + + _multicast_network = IPv6Network('ff00::/8') + + _private_networks = [ + IPv6Network('::1/128'), + IPv6Network('::/128'), + IPv6Network('::ffff:0:0/96'), + IPv6Network('100::/64'), + IPv6Network('2001::/23'), + IPv6Network('2001:2::/48'), + IPv6Network('2001:db8::/32'), + IPv6Network('2001:10::/28'), + IPv6Network('fc00::/7'), + IPv6Network('fe80::/10'), + ] + + _reserved_networks = [ + IPv6Network('::/8'), IPv6Network('100::/8'), + IPv6Network('200::/7'), IPv6Network('400::/6'), + IPv6Network('800::/5'), IPv6Network('1000::/4'), + IPv6Network('4000::/3'), IPv6Network('6000::/3'), + IPv6Network('8000::/3'), IPv6Network('A000::/3'), + IPv6Network('C000::/3'), IPv6Network('E000::/4'), + IPv6Network('F000::/5'), IPv6Network('F800::/6'), + IPv6Network('FE00::/9'), + ] + + _sitelocal_network = IPv6Network('fec0::/10') + + +IPv6Address._constants = _IPv6Constants diff --git a/test/support/integration/plugins/module_utils/net_tools/__init__.py b/test/support/integration/plugins/module_utils/net_tools/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/support/integration/plugins/module_utils/net_tools/__init__.py diff --git a/test/support/integration/plugins/module_utils/network/__init__.py b/test/support/integration/plugins/module_utils/network/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/support/integration/plugins/module_utils/network/__init__.py diff --git a/test/support/integration/plugins/module_utils/network/common/__init__.py b/test/support/integration/plugins/module_utils/network/common/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/support/integration/plugins/module_utils/network/common/__init__.py diff --git a/test/support/integration/plugins/module_utils/network/common/utils.py b/test/support/integration/plugins/module_utils/network/common/utils.py new file mode 100644 index 0000000..8031738 --- /dev/null +++ b/test/support/integration/plugins/module_utils/network/common/utils.py @@ -0,0 +1,643 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Networking tools for network modules only + +import re +import ast +import operator +import socket +import json + +from itertools import chain + +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils import basic +from ansible.module_utils.parsing.convert_bool import boolean + +# Backwards compatibility for 3rd party modules +# TODO(pabelanger): With move to ansible.netcommon, we should clean this code +# up and have modules import directly themself. +from ansible.module_utils.common.network import ( # noqa: F401 + to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS +) + +try: + from jinja2 import Environment, StrictUndefined + from jinja2.exceptions import UndefinedError + HAS_JINJA2 = True +except ImportError: + HAS_JINJA2 = False + + +OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le']) +ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')]) + + +def to_list(val): + if isinstance(val, (list, tuple, set)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = to_text(item).split('\n') + yield item + + +def transform_commands(module): + transform = ComplexList(dict( + command=dict(key=True), + output=dict(), + prompt=dict(type='list'), + answer=dict(type='list'), + newline=dict(type='bool', default=True), + sendonly=dict(type='bool', default=False), + check_all=dict(type='bool', default=False), + ), module) + + return transform(module.params['commands']) + + +def sort_list(val): + if isinstance(val, list): + return sorted(val) + return val + + +class Entity(object): + """Transforms a dict to with an argument spec + + This class will take a dict and apply an Ansible argument spec to the + values. The resulting dict will contain all of the keys in the param + with appropriate values set. + + Example:: + + argument_spec = dict( + command=dict(key=True), + display=dict(default='text', choices=['text', 'json']), + validate=dict(type='bool') + ) + transform = Entity(module, argument_spec) + value = dict(command='foo') + result = transform(value) + print result + {'command': 'foo', 'display': 'text', 'validate': None} + + Supported argument spec: + * key - specifies how to map a single value to a dict + * read_from - read and apply the argument_spec from the module + * required - a value is required + * type - type of value (uses AnsibleModule type checker) + * fallback - implements fallback function + * choices - set of valid options + * default - default value + """ + + def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False): + args = [] if args is None else args + + self._attributes = attrs or {} + self._module = module + + for arg in args: + self._attributes[arg] = dict() + if from_argspec: + self._attributes[arg]['read_from'] = arg + if keys and arg in keys: + self._attributes[arg]['key'] = True + + self.attr_names = frozenset(self._attributes.keys()) + + _has_key = False + + for name, attr in iteritems(self._attributes): + if attr.get('read_from'): + if attr['read_from'] not in self._module.argument_spec: + module.fail_json(msg='argument %s does not exist' % attr['read_from']) + spec = self._module.argument_spec.get(attr['read_from']) + for key, value in iteritems(spec): + if key not in attr: + attr[key] = value + + if attr.get('key'): + if _has_key: + module.fail_json(msg='only one key value can be specified') + _has_key = True + attr['required'] = True + + def serialize(self): + return self._attributes + + def to_dict(self, value): + obj = {} + for name, attr in iteritems(self._attributes): + if attr.get('key'): + obj[name] = value + else: + obj[name] = attr.get('default') + return obj + + def __call__(self, value, strict=True): + if not isinstance(value, dict): + value = self.to_dict(value) + + if strict: + unknown = set(value).difference(self.attr_names) + if unknown: + self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown)) + + for name, attr in iteritems(self._attributes): + if value.get(name) is None: + value[name] = attr.get('default') + + if attr.get('fallback') and not value.get(name): + fallback = attr.get('fallback', (None,)) + fallback_strategy = fallback[0] + fallback_args = [] + fallback_kwargs = {} + if fallback_strategy is not None: + for item in fallback[1:]: + if isinstance(item, dict): + fallback_kwargs = item + else: + fallback_args = item + try: + value[name] = fallback_strategy(*fallback_args, **fallback_kwargs) + except basic.AnsibleFallbackNotFound: + continue + + if attr.get('required') and value.get(name) is None: + self._module.fail_json(msg='missing required attribute %s' % name) + + if 'choices' in attr: + if value[name] not in attr['choices']: + self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name])) + + if value[name] is not None: + value_type = attr.get('type', 'str') + type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type] + type_checker(value[name]) + elif value.get(name): + value[name] = self._module.params[name] + + return value + + +class EntityCollection(Entity): + """Extends ```Entity``` to handle a list of dicts """ + + def __call__(self, iterable, strict=True): + if iterable is None: + iterable = [super(EntityCollection, self).__call__(self._module.params, strict)] + + if not isinstance(iterable, (list, tuple)): + self._module.fail_json(msg='value must be an iterable') + + return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable] + + +# these two are for backwards compatibility and can be removed once all of the +# modules that use them are updated +class ComplexDict(Entity): + def __init__(self, attrs, module, *args, **kwargs): + super(ComplexDict, self).__init__(module, attrs, *args, **kwargs) + + +class ComplexList(EntityCollection): + def __init__(self, attrs, module, *args, **kwargs): + super(ComplexList, self).__init__(module, attrs, *args, **kwargs) + + +def dict_diff(base, comparable): + """ Generate a dict object of differences + + This function will compare two dict objects and return the difference + between them as a dict object. For scalar values, the key will reflect + the updated value. If the key does not exist in `comparable`, then then no + key will be returned. For lists, the value in comparable will wholly replace + the value in base for the key. For dicts, the returned value will only + return keys that are different. + + :param base: dict object to base the diff on + :param comparable: dict object to compare against base + + :returns: new dict object with differences + """ + if not isinstance(base, dict): + raise AssertionError("`base` must be of type <dict>") + if not isinstance(comparable, dict): + if comparable is None: + comparable = dict() + else: + raise AssertionError("`comparable` must be of type <dict>") + + updates = dict() + + for key, value in iteritems(base): + if isinstance(value, dict): + item = comparable.get(key) + if item is not None: + sub_diff = dict_diff(value, comparable[key]) + if sub_diff: + updates[key] = sub_diff + else: + comparable_value = comparable.get(key) + if comparable_value is not None: + if sort_list(base[key]) != sort_list(comparable_value): + updates[key] = comparable_value + + for key in set(comparable.keys()).difference(base.keys()): + updates[key] = comparable.get(key) + + return updates + + +def dict_merge(base, other): + """ Return a new dict object that combines base and other + + This will create a new dict object that is a combination of the key/value + pairs from base and other. When both keys exist, the value will be + selected from other. If the value is a list object, the two lists will + be combined and duplicate entries removed. + + :param base: dict object to serve as base + :param other: dict object to combine with base + + :returns: new combined dict object + """ + if not isinstance(base, dict): + raise AssertionError("`base` must be of type <dict>") + if not isinstance(other, dict): + raise AssertionError("`other` must be of type <dict>") + + combined = dict() + + for key, value in iteritems(base): + if isinstance(value, dict): + if key in other: + item = other.get(key) + if item is not None: + if isinstance(other[key], Mapping): + combined[key] = dict_merge(value, other[key]) + else: + combined[key] = other[key] + else: + combined[key] = item + else: + combined[key] = value + elif isinstance(value, list): + if key in other: + item = other.get(key) + if item is not None: + try: + combined[key] = list(set(chain(value, item))) + except TypeError: + value.extend([i for i in item if i not in value]) + combined[key] = value + else: + combined[key] = item + else: + combined[key] = value + else: + if key in other: + other_value = other.get(key) + if other_value is not None: + if sort_list(base[key]) != sort_list(other_value): + combined[key] = other_value + else: + combined[key] = value + else: + combined[key] = other_value + else: + combined[key] = value + + for key in set(other.keys()).difference(base.keys()): + combined[key] = other.get(key) + + return combined + + +def param_list_to_dict(param_list, unique_key="name", remove_key=True): + """Rotates a list of dictionaries to be a dictionary of dictionaries. + + :param param_list: The aforementioned list of dictionaries + :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value + behind this key will be the key each dictionary can be found at in the new root dictionary + :param remove_key: If True, remove unique_key from the individual dictionaries before returning. + """ + param_dict = {} + for params in param_list: + params = params.copy() + if remove_key: + name = params.pop(unique_key) + else: + name = params.get(unique_key) + param_dict[name] = params + + return param_dict + + +def conditional(expr, val, cast=None): + match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I) + if match: + op, arg = match.groups() + else: + op = 'eq' + if ' ' in str(expr): + raise AssertionError('invalid expression: cannot contain spaces') + arg = expr + + if cast is None and val is not None: + arg = type(val)(arg) + elif callable(cast): + arg = cast(arg) + val = cast(val) + + op = next((oper for alias, oper in ALIASES if op == alias), op) + + if not hasattr(operator, op) and op not in OPERATORS: + raise ValueError('unknown operator: %s' % op) + + func = getattr(operator, op) + return func(val, arg) + + +def ternary(value, true_val, false_val): + ''' value ? true_val : false_val ''' + if value: + return true_val + else: + return false_val + + +def remove_default_spec(spec): + for item in spec: + if 'default' in spec[item]: + del spec[item]['default'] + + +def validate_ip_address(address): + try: + socket.inet_aton(address) + except socket.error: + return False + return address.count('.') == 3 + + +def validate_ip_v6_address(address): + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + return False + return True + + +def validate_prefix(prefix): + if prefix and not 0 <= int(prefix) <= 32: + return False + return True + + +def load_provider(spec, args): + provider = args.get('provider') or {} + for key, value in iteritems(spec): + if key not in provider: + if 'fallback' in value: + provider[key] = _fallback(value['fallback']) + elif 'default' in value: + provider[key] = value['default'] + else: + provider[key] = None + if 'authorize' in provider: + # Coerce authorize to provider if a string has somehow snuck in. + provider['authorize'] = boolean(provider['authorize'] or False) + args['provider'] = provider + return provider + + +def _fallback(fallback): + strategy = fallback[0] + args = [] + kwargs = {} + + for item in fallback[1:]: + if isinstance(item, dict): + kwargs = item + else: + args = item + try: + return strategy(*args, **kwargs) + except basic.AnsibleFallbackNotFound: + pass + + +def generate_dict(spec): + """ + Generate dictionary which is in sync with argspec + + :param spec: A dictionary that is the argspec of the module + :rtype: A dictionary + :returns: A dictionary in sync with argspec with default value + """ + obj = {} + if not spec: + return obj + + for key, val in iteritems(spec): + if 'default' in val: + dct = {key: val['default']} + elif 'type' in val and val['type'] == 'dict': + dct = {key: generate_dict(val['options'])} + else: + dct = {key: None} + obj.update(dct) + return obj + + +def parse_conf_arg(cfg, arg): + """ + Parse config based on argument + + :param cfg: A text string which is a line of configuration. + :param arg: A text string which is to be matched. + :rtype: A text string + :returns: A text string if match is found + """ + match = re.search(r'%s (.+)(\n|$)' % arg, cfg, re.M) + if match: + result = match.group(1).strip() + else: + result = None + return result + + +def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str='no'): + """ + Parse config based on command + + :param cfg: A text string which is a line of configuration. + :param cmd: A text string which is the command to be matched + :param res1: A text string to be returned if the command is present + :param res2: A text string to be returned if the negate command + is present + :param delete_str: A text string to identify the start of the + negate command + :rtype: A text string + :returns: A text string if match is found + """ + match = re.search(r'\n\s+%s(\n|$)' % cmd, cfg) + if match: + return res1 + if res2 is not None: + match = re.search(r'\n\s+%s %s(\n|$)' % (delete_str, cmd), cfg) + if match: + return res2 + return None + + +def get_xml_conf_arg(cfg, path, data='text'): + """ + :param cfg: The top level configuration lxml Element tree object + :param path: The relative xpath w.r.t to top level element (cfg) + to be searched in the xml hierarchy + :param data: The type of data to be returned for the matched xml node. + Valid values are text, tag, attrib, with default as text. + :return: Returns the required type for the matched xml node or else None + """ + match = cfg.xpath(path) + if len(match): + if data == 'tag': + result = getattr(match[0], 'tag') + elif data == 'attrib': + result = getattr(match[0], 'attrib') + else: + result = getattr(match[0], 'text') + else: + result = None + return result + + +def remove_empties(cfg_dict): + """ + Generate final config dictionary + + :param cfg_dict: A dictionary parsed in the facts system + :rtype: A dictionary + :returns: A dictionary by eliminating keys that have null values + """ + final_cfg = {} + if not cfg_dict: + return final_cfg + + for key, val in iteritems(cfg_dict): + dct = None + if isinstance(val, dict): + child_val = remove_empties(val) + if child_val: + dct = {key: child_val} + elif (isinstance(val, list) and val + and all([isinstance(x, dict) for x in val])): + child_val = [remove_empties(x) for x in val] + if child_val: + dct = {key: child_val} + elif val not in [None, [], {}, (), '']: + dct = {key: val} + if dct: + final_cfg.update(dct) + return final_cfg + + +def validate_config(spec, data): + """ + Validate if the input data against the AnsibleModule spec format + :param spec: Ansible argument spec + :param data: Data to be validated + :return: + """ + params = basic._ANSIBLE_ARGS + basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': data})) + validated_data = basic.AnsibleModule(spec).params + basic._ANSIBLE_ARGS = params + return validated_data + + +def search_obj_in_list(name, lst, key='name'): + if not lst: + return None + else: + for item in lst: + if item.get(key) == name: + return item + + +class Template: + + def __init__(self): + if not HAS_JINJA2: + raise ImportError("jinja2 is required but does not appear to be installed. " + "It can be installed using `pip install jinja2`") + + self.env = Environment(undefined=StrictUndefined) + self.env.filters.update({'ternary': ternary}) + + def __call__(self, value, variables=None, fail_on_undefined=True): + variables = variables or {} + + if not self.contains_vars(value): + return value + + try: + value = self.env.from_string(value).render(variables) + except UndefinedError: + if not fail_on_undefined: + return None + raise + + if value: + try: + return ast.literal_eval(value) + except Exception: + return str(value) + else: + return None + + def contains_vars(self, data): + if isinstance(data, string_types): + for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string): + if marker in data: + return True + return False diff --git a/test/support/integration/plugins/modules/htpasswd.py b/test/support/integration/plugins/modules/htpasswd.py new file mode 100644 index 0000000..2c55a6b --- /dev/null +++ b/test/support/integration/plugins/modules/htpasswd.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +module: htpasswd +version_added: "1.3" +short_description: manage user files for basic authentication +description: + - Add and remove username/password entries in a password file using htpasswd. + - This is used by web servers such as Apache and Nginx for basic authentication. +options: + path: + required: true + aliases: [ dest, destfile ] + description: + - Path to the file that contains the usernames and passwords + name: + required: true + aliases: [ username ] + description: + - User name to add or remove + password: + required: false + description: + - Password associated with user. + - Must be specified if user does not exist yet. + crypt_scheme: + required: false + choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + default: "apr_md5_crypt" + description: + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx + state: + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the user entry should be present or not + create: + required: false + type: bool + default: "yes" + description: + - Used with C(state=present). If specified, the file will be created + if it does not already exist. If set to "no", will fail if the + file does not exist +notes: + - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." + - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." + - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." +requirements: [ passlib>=1.6 ] +author: "Ansible Core Team" +extends_documentation_fragment: files +""" + +EXAMPLES = """ +# Add a user to a password file and ensure permissions are set +- htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: 0640 + +# Remove a user from a password file +- htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + crypt_scheme: md5_crypt +""" + + +import os +import tempfile +import traceback +from ansible.module_utils.compat.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +PASSLIB_IMP_ERR = None +try: + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext + import passlib +except ImportError: + PASSLIB_IMP_ERR = traceback.format_exc() + passlib_installed = False +else: + passlib_installed = True + +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + + +def create_missing_directories(dest): + destpath = os.path.dirname(dest) + if not os.path.exists(destpath): + os.makedirs(destpath) + + +def present(dest, username, password, crypt_scheme, create, check_mode): + """ Ensures user is present + + Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + if not os.path.exists(dest): + if not create: + raise ValueError('Destination %s does not exist' % dest) + if check_mode: + return ("Create %s" % dest, True) + create_missing_directories(dest) + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Created %s and added %s" % (dest, username), True) + else: + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + + found = None + if getattr(ht, 'check_password', None): + found = ht.check_password(username, password) + else: + found = ht.verify(username, password) + + if found: + return ("%s already present" % username, False) + else: + if not check_mode: + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Add/update %s" % username, True) + + +def absent(dest, username, check_mode): + """ Ensures user is absent + + Returns (msg, changed) """ + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False) + else: + ht = HtpasswdFile(dest) + + if username not in ht.users(): + return ("%s not present" % username, False) + else: + if not check_mode: + ht.delete(username) + ht.save() + return ("Remove %s" % username, True) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + arg_spec = dict( + path=dict(required=True, aliases=["dest", "destfile"]), + name=dict(required=True, aliases=["username"]), + password=dict(required=False, default=None, no_log=True), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), + state=dict(required=False, default="present"), + create=dict(type='bool', default='yes'), + + ) + module = AnsibleModule(argument_spec=arg_spec, + add_file_common_args=True, + supports_check_mode=True) + + path = module.params['path'] + username = module.params['name'] + password = module.params['password'] + crypt_scheme = module.params['crypt_scheme'] + state = module.params['state'] + create = module.params['create'] + check_mode = module.check_mode + + if not passlib_installed: + module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: + try: + lines = f.readlines() + finally: + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [f.write(line) for line in lines if line.strip()] + finally: + f.close() + + try: + if state == 'present': + (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) + (msg, changed) = absent(path, username, check_mode) + else: + module.fail_json(msg="Invalid state: %s" % state) + + check_file_attrs(module, changed, msg) + module.exit_json(msg=msg, changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/pkgng.py b/test/support/integration/plugins/modules/pkgng.py new file mode 100644 index 0000000..1136347 --- /dev/null +++ b/test/support/integration/plugins/modules/pkgng.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, bleader +# Written by bleader <bleader@ratonland.org> +# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com> +# that was based on pacman module written by Afterburn <https://github.com/afterburn> +# that was based on apt module written by Matthew Williams <matthew@flowroute.com> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. +version_added: "1.2" +options: + name: + description: + - Name or list of names of packages to install/remove. + required: true + state: + description: + - State of the package. + - 'Note: "latest" added in 2.7' + choices: [ 'present', 'latest', 'absent' ] + required: false + default: present + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: no + annotation: + description: + - A comma-separated list of keyvalue-pairs of the form + C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a + C(-) denotes removing an annotation, and C(:) denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false + version_added: "1.6" + pkgsite: + description: + - For pkgng versions before 1.1.4, specify packagesite to use + for downloading packages. If not specified, use settings from + C(/usr/local/etc/pkg.conf). + - For newer pkgng versions, specify a the name of a repository + configured in C(/usr/local/etc/pkg/repos). + required: false + rootdir: + description: + - For pkgng versions 1.5 and later, pkg will install all packages + within the specified root directory. + - Can not be used together with I(chroot) or I(jail) options. + required: false + chroot: + version_added: "2.1" + description: + - Pkg will chroot in the specified environment. + - Can not be used together with I(rootdir) or I(jail) options. + required: false + jail: + version_added: "2.4" + description: + - Pkg will execute in the given jail name or id. + - Can not be used together with I(chroot) or I(rootdir) options. + autoremove: + version_added: "2.2" + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: no +author: "bleader (@bleader)" +notes: + - When using pkgsite, be careful that already in cache packages won't be downloaded again. + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' + +EXAMPLES = ''' +- name: Install package foo + pkgng: + name: foo + state: present + +- name: Annotate package foo and bar + pkgng: + name: foo,bar + annotation: '+test1=baz,-test2,:test3=foobar' + +- name: Remove packages foo and bar + pkgng: + name: foo,bar + state: absent + +# "latest" support added in 2.7 +- name: Upgrade package baz + pkgng: + name: baz + state: latest +''' + + +import re +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, pkgng_path, name, dir_arg): + + rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name)) + + if rc == 0: + return True + + return False + + +def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite): + + # Check to see if a package upgrade is available. + # rc = 0, no updates available or package not installed + # rc = 1, updates available + if old_pkgng: + rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name)) + else: + rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name)) + + if rc == 1: + return True + + return False + + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command("%s -v" % pkgng_path) + version = [int(x) for x in re.split(r'[\._]', out)] + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def remove_packages(module, pkgng_path, packages, dir_arg): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, pkgng_path, package, dir_arg): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package)) + + if not module.check_mode and query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state): + + install_c = 0 + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + if pkgsite != "": + if old_pkgng: + pkgsite = "PACKAGESITE=%s" % (pkgsite) + else: + pkgsite = "-r %s" % (pkgsite) + + # This environment variable skips mid-install prompts, + # setting them to their default values. + batch_var = 'env BATCH=yes' + + if not module.check_mode and not cached: + if old_pkgng: + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) + else: + rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg)) + if rc != 0: + module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err)) + + for package in packages: + already_installed = query_package(module, pkgng_path, package, dir_arg) + if already_installed and state == "present": + continue + + update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite) + if not update_available and already_installed and state == "latest": + continue + + if not module.check_mode: + if already_installed: + action = "upgrade" + else: + action = "install" + if old_pkgng: + rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package)) + else: + rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package)) + + if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err) + + install_c += 1 + + if install_c > 0: + return (True, "added %s package(s)" % (install_c)) + + return (False, "package(s) already %s" % (state)) + + +def annotation_query(module, pkgng_path, package, tag, dir_arg): + rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package)) + match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not _value: + # Annotation does not exist, add it. + rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + + +def annotation_delete(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if _value: + rc, out, err = module.run_command('%s %s annotate -y -D %s %s' + % (pkgng_path, dir_arg, package, tag)) + if rc != 0: + module.fail_json(msg="could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + + +def annotation_modify(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not value: + # No such tag + module.fail_json(msg="could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not change annotation annotation to %s: %s" + % (package, out), stderr=err) + return True + + +def annotate_packages(module, pkgng_path, packages, annotation, dir_arg): + annotate_c = 0 + annotations = map(lambda _annotation: + re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?', + _annotation).groupdict(), + re.split(r',', annotation)) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for _annotation in annotations: + if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']): + annotate_c += 1 + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + + +def autoremove_packages(module, pkgng_path, dir_arg): + rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg)) + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return False, "no package(s) to autoremove" + + if not module.check_mode: + rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg)) + + return True, "autoremoved %d package(s)" % (autoremove_c) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "latest", "absent"], required=False), + name=dict(aliases=["pkg"], required=True, type='list'), + cached=dict(default=False, type='bool'), + annotation=dict(default="", required=False), + pkgsite=dict(default="", required=False), + rootdir=dict(default="", required=False, type='path'), + chroot=dict(default="", required=False, type='path'), + jail=dict(default="", required=False, type='str'), + autoremove=dict(default=False, type='bool')), + supports_check_mode=True, + mutually_exclusive=[["rootdir", "chroot", "jail"]]) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"] + + changed = False + msgs = [] + dir_arg = "" + + if p["rootdir"] != "": + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) + if old_pkgng: + module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") + else: + dir_arg = "--rootdir %s" % (p["rootdir"]) + + if p["chroot"] != "": + dir_arg = '--chroot %s' % (p["chroot"]) + + if p["jail"] != "": + dir_arg = '--jail %s' % (p["jail"]) + + if p["state"] in ("present", "latest"): + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"]) + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent": + _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["annotation"]: + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/sefcontext.py b/test/support/integration/plugins/modules/sefcontext.py new file mode 100644 index 0000000..5574abc --- /dev/null +++ b/test/support/integration/plugins/modules/sefcontext.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: +- Manages SELinux file context mapping definitions. +- Similar to the C(semanage fcontext) command. +version_added: '2.2' +options: + target: + description: + - Target path (expression). + type: str + required: yes + aliases: [ path ] + ftype: + description: + - The file type that should have SELinux contexts applied. + - "The following file type options are available:" + - C(a) for all files, + - C(b) for block devices, + - C(c) for character devices, + - C(d) for directories, + - C(f) for regular files, + - C(l) for symbolic links, + - C(p) for named pipes, + - C(s) for socket files. + type: str + choices: [ a, b, c, d, f, l, p, s ] + default: a + setype: + description: + - SELinux type for the specified target. + type: str + required: yes + seuser: + description: + - SELinux user for the specified target. + type: str + selevel: + description: + - SELinux range for the specified target. + type: str + aliases: [ serange ] + state: + description: + - Whether the SELinux file context must be C(absent) or C(present). + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: yes + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + type: bool + default: no + version_added: '2.8' +notes: +- The changes are persistent across reboots. +- The M(sefcontext) module does not modify existing files to the new + SELinux context(s), so it is advisable to first create the SELinux + file contexts before creating files, or run C(restorecon) manually + for the existing files that require the new SELinux file contexts. +- Not applying SELinux fcontexts to existing files is a deliberate + decision as it would be unclear what reported changes would entail + to, and there's no guarantee that applying SELinux fcontext does + not pick up other unrelated prior changes. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Allow apache to modify files in /srv/git_repos + sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_git_rw_content_t + state: present + +- name: Apply new SELinux file context to filesystem + command: restorecon -irv /srv/git_repos +''' + +RETURN = r''' +# Default return values +''' + +import os +import subprocess +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module +from ansible.module_utils._text import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()), + setype=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + if not HAVE_SELINUX or not HAVE_SEOBJECT and not has_respawned(): + system_interpreters = [ + '/usr/libexec/platform-python', + '/usr/bin/python3', + '/usr/bin/python2', + ] + # policycoreutils-python depends on libselinux-python + interpreter = probe_interpreters_for_module(system_interpreters, 'seobject') + if interpreter: + respawn_module(interpreter) + + if not HAVE_SELINUX or not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python(3)"), exception=SELINUX_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/timezone.py b/test/support/integration/plugins/modules/timezone.py new file mode 100644 index 0000000..b7439a1 --- /dev/null +++ b/test/support/integration/plugins/modules/timezone.py @@ -0,0 +1,909 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Shinichi TAMURA (@tmshn) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: timezone +short_description: Configure timezone setting +description: + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module. + - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. + - Several different tools are used depending on the OS/Distribution involved. + For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). + On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. + On AIX, C(chtz) is used. + - As of Ansible 2.3 support was added for SmartOS and BSDs. + - As of Ansible 2.4 support was added for macOS. + - As of Ansible 2.9 support was added for AIX 6.1+ + - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. +version_added: "2.2" +options: + name: + description: + - Name of the timezone for the system clock. + - Default is to keep current setting. + - B(At least one of name and hwclock are required.) + type: str + hwclock: + description: + - Whether the hardware clock is in UTC or in local timezone. + - Default is to keep current setting. + - Note that this option is recommended not to change and may fail + to configure, especially on virtual environments such as AWS. + - B(At least one of name and hwclock are required.) + - I(Only used on Linux.) + type: str + aliases: [ rtc ] + choices: [ local, UTC ] +notes: + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone + - On AIX only Olson/tz database timezones are useable (POSIX is not supported). + - An OS reboot is also required on AIX for the new timezone setting to take effect. +author: + - Shinichi TAMURA (@tmshn) + - Jasper Lievisse Adriaanse (@jasperla) + - Indrajit Raychaudhuri (@indrajitr) +''' + +RETURN = r''' +diff: + description: The differences about the given arguments. + returned: success + type: complex + contains: + before: + description: The values before change + type: dict + after: + description: The values after change + type: dict +''' + +EXAMPLES = r''' +- name: Set timezone to Asia/Tokyo + timezone: + name: Asia/Tokyo +''' + +import errno +import os +import platform +import random +import re +import string +import filecmp + +from ansible.module_utils.basic import AnsibleModule, get_distribution +from ansible.module_utils.six import iteritems + + +class Timezone(object): + """This is a generic Timezone manipulation class that is subclassed based on platform. + + A subclass may wish to override the following action methods: + - get(key, phase) ... get the value from the system at `phase` + - set(key, value) ... set the value to the current system + """ + + def __new__(cls, module): + """Return the platform-specific subclass. + + It does not use load_platform_subclass() because it needs to judge based + on whether the `timedatectl` command exists and is available. + + Args: + module: The AnsibleModule. + """ + if platform.system() == 'Linux': + timedatectl = module.get_bin_path('timedatectl') + if timedatectl is not None: + rc, stdout, stderr = module.run_command(timedatectl) + if rc == 0: + return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) + else: + module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr) + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + else: + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + elif re.match('^joyent_.*Z', platform.version()): + # platform.system() returns SunOS, which is too broad. So look at the + # platform version instead. However we have to ensure that we're not + # running in the global zone where changing the timezone has no effect. + zonename_cmd = module.get_bin_path('zonename') + if zonename_cmd is not None: + (rc, stdout, _) = module.run_command(zonename_cmd) + if rc == 0 and stdout.strip() == 'global': + module.fail_json(msg='Adjusting timezone is not supported in Global Zone') + + return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone) + elif platform.system() == 'Darwin': + return super(Timezone, DarwinTimezone).__new__(DarwinTimezone) + elif re.match('^(Free|Net|Open)BSD', platform.platform()): + return super(Timezone, BSDTimezone).__new__(BSDTimezone) + elif platform.system() == 'AIX': + AIXoslevel = int(platform.version() + platform.release()) + if AIXoslevel >= 61: + return super(Timezone, AIXTimezone).__new__(AIXTimezone) + else: + module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel) + else: + # Not supported yet + return super(Timezone, Timezone).__new__(Timezone) + + def __init__(self, module): + """Initialize of the class. + + Args: + module: The AnsibleModule. + """ + super(Timezone, self).__init__() + self.msg = [] + # `self.value` holds the values for each params on each phases. + # Initially there's only info of "planned" phase, but the + # `self.check()` function will fill out it. + self.value = dict() + for key in module.argument_spec: + value = module.params[key] + if value is not None: + self.value[key] = dict(planned=value) + self.module = module + + def abort(self, msg): + """Abort the process with error message. + + This is just the wrapper of module.fail_json(). + + Args: + msg: The error message. + """ + error_msg = ['Error message:', msg] + if len(self.msg) > 0: + error_msg.append('Other message(s):') + error_msg.extend(self.msg) + self.module.fail_json(msg='\n'.join(error_msg)) + + def execute(self, *commands, **kwargs): + """Execute the shell command. + + This is just the wrapper of module.run_command(). + + Args: + *commands: The command to execute. + It will be concatenated with single space. + **kwargs: Only 'log' key is checked. + If kwargs['log'] is true, record the command to self.msg. + + Returns: + stdout: Standard output of the command. + """ + command = ' '.join(commands) + (rc, stdout, stderr) = self.module.run_command(command, check_rc=True) + if kwargs.get('log', False): + self.msg.append('executed `%s`' % command) + return stdout + + def diff(self, phase1='before', phase2='after'): + """Calculate the difference between given 2 phases. + + Args: + phase1, phase2: The names of phase to compare. + + Returns: + diff: The difference of value between phase1 and phase2. + This is in the format which can be used with the + `--diff` option of ansible-playbook. + """ + diff = {phase1: {}, phase2: {}} + for key, value in iteritems(self.value): + diff[phase1][key] = value[phase1] + diff[phase2][key] = value[phase2] + return diff + + def check(self, phase): + """Check the state in given phase and set it to `self.value`. + + Args: + phase: The name of the phase to check. + + Returns: + NO RETURN VALUE + """ + if phase == 'planned': + return + for key, value in iteritems(self.value): + value[phase] = self.get(key, phase) + + def change(self): + """Make the changes effect based on `self.value`.""" + for key, value in iteritems(self.value): + if value['before'] != value['planned']: + self.set(key, value['planned']) + + # =========================================== + # Platform specific methods (must be replaced by subclass). + + def get(self, key, phase): + """Get the value for the key at the given phase. + + Called from self.check(). + + Args: + key: The key to get the value + phase: The phase to get the value + + Return: + value: The value for the key at the given phase. + """ + self.abort('get(key, phase) is not implemented on target platform') + + def set(self, key, value): + """Set the value for the key (of course, for the phase 'after'). + + Called from self.change(). + + Args: + key: Key to set the value + value: Value to set + """ + self.abort('set(key, value) is not implemented on target platform') + + def _verify_timezone(self): + tz = self.value['name']['planned'] + tzfile = '/usr/share/zoneinfo/%s' % tz + if not os.path.isfile(tzfile): + self.abort('given timezone "%s" is not available' % tz) + return tzfile + + +class SystemdTimezone(Timezone): + """This is a Timezone manipulation class for systemd-powered Linux. + + It uses the `timedatectl` command to check/set all arguments. + """ + + regexps = dict( + hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), + name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + subcmds = dict( + hwclock='set-local-rtc', + name='set-timezone' + ) + + def __init__(self, module): + super(SystemdTimezone, self).__init__(module) + self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_status(self, phase): + if phase not in self.status: + self.status[phase] = self.execute(self.timedatectl, 'status') + return self.status[phase] + + def get(self, key, phase): + status = self._get_status(phase) + value = self.regexps[key].search(status).group(1) + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> local/UTC + if self.module.boolean(value): + value = 'local' + else: + value = 'UTC' + return value + + def set(self, key, value): + # For key='hwclock'; convert UTC/local -> yes/no + if key == 'hwclock': + if value == 'local': + value = 'yes' + else: + value = 'no' + self.execute(self.timedatectl, self.subcmds[key], value, log=True) + + +class NosystemdTimezone(Timezone): + """This is a Timezone manipulation class for non systemd-powered Linux. + + For timezone setting, it edits the following file and reflect changes: + - /etc/sysconfig/clock ... RHEL/CentOS + - /etc/timezone ... Debian/Ubuntu + For hwclock setting, it executes `hwclock --systohc` command with the + '--utc' or '--localtime' option. + """ + + conf_files = dict( + name=None, # To be set in __init__ + hwclock=None, # To be set in __init__ + adjtime='/etc/adjtime' + ) + + # It's fine if all tree config files don't exist + allow_no_file = dict( + name=True, + hwclock=True, + adjtime=True + ) + + regexps = dict( + name=None, # To be set in __init__ + hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), + adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + ) + + dist_regexps = dict( + SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), + redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + ) + + dist_tzline_format = dict( + SuSE='TIMEZONE="%s"\n', + redhat='ZONE="%s"\n' + ) + + def __init__(self, module): + super(NosystemdTimezone, self).__init__(module) + # Validate given timezone + if 'name' in self.value: + tzfile = self._verify_timezone() + # `--remove-destination` is needed if /etc/localtime is a symlink so + # that it overwrites it instead of following it. + self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)] + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + # Distribution-specific configurations + if self.module.get_bin_path('dpkg-reconfigure') is not None: + # Debian/Ubuntu + if 'name' in self.value: + self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile), + '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)] + self.conf_files['name'] = '/etc/timezone' + self.conf_files['hwclock'] = '/etc/default/rcS' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' + else: + # RHEL/CentOS/SUSE + if self.module.get_bin_path('tzdata-update') is not None: + # tzdata-update cannot update the timezone if /etc/localtime is + # a symlink so we have to use cp to update the time zone which + # was set above. + if not os.path.islink('/etc/localtime'): + self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)] + # else: + # self.update_timezone = 'cp --remove-destination ...' <- configured above + self.conf_files['name'] = '/etc/sysconfig/clock' + self.conf_files['hwclock'] = '/etc/sysconfig/clock' + try: + f = open(self.conf_files['name'], 'r') + except IOError as err: + if self._allow_ioerror(err, 'name'): + # If the config file doesn't exist detect the distribution and set regexps. + distribution = get_distribution() + if distribution == 'SuSE': + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + else: + self.abort('could not read configuration file "%s"' % self.conf_files['name']) + else: + # The key for timezone might be `ZONE` or `TIMEZONE` + # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). + # So check the content of /etc/sysconfig/clock and decide which key to use. + sysconfig_clock = f.read() + f.close() + if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + + def _allow_ioerror(self, err, key): + # In some cases, even if the target file does not exist, + # simply creating it may solve the problem. + # In such cases, we should continue the configuration rather than aborting. + if err.errno != errno.ENOENT: + # If the error is not ENOENT ("No such file or directory"), + # (e.g., permission error, etc), we should abort. + return False + return self.allow_no_file.get(key, False) + + def _edit_file(self, filename, regexp, value, key): + """Replace the first matched line with given `value`. + + If `regexp` matched more than once, other than the first line will be deleted. + + Args: + filename: The name of the file to edit. + regexp: The regular expression to search with. + value: The line which will be inserted. + key: For what key the file is being editted. + """ + # Read the file + try: + file = open(filename, 'r') + except IOError as err: + if self._allow_ioerror(err, key): + lines = [] + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + lines = file.readlines() + file.close() + # Find the all matched lines + matched_indices = [] + for i, line in enumerate(lines): + if regexp.search(line): + matched_indices.append(i) + if len(matched_indices) > 0: + insert_line = matched_indices[0] + else: + insert_line = 0 + # Remove all matched lines + for i in matched_indices[::-1]: + del lines[i] + # ...and insert the value + lines.insert(insert_line, value) + # Write the changes + try: + file = open(filename, 'w') + except IOError: + self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename)) + else: + file.writelines(lines) + file.close() + self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) + + def _get_value_from_config(self, key, phase): + filename = self.conf_files[key] + try: + file = open(filename, mode='r') + except IOError as err: + if self._allow_ioerror(err, key): + if key == 'hwclock': + return 'n/a' + elif key == 'adjtime': + return 'UTC' + elif key == 'name': + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + status = file.read() + file.close() + try: + value = self.regexps[key].search(status).group(1) + except AttributeError: + if key == 'hwclock': + # If we cannot find UTC in the config that's fine. + return 'n/a' + elif key == 'adjtime': + # If we cannot find UTC/LOCAL in /etc/cannot that means UTC + # will be used by default. + return 'UTC' + elif key == 'name': + if phase == 'before': + # In 'before' phase UTC/LOCAL doesn't need to be set in + # the timezone config file, so we ignore this error. + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename)) + else: + if key == 'hwclock': + # convert yes/no -> UTC/local + if self.module.boolean(value): + value = 'UTC' + else: + value = 'local' + elif key == 'adjtime': + # convert LOCAL -> local + if value != 'UTC': + value = value.lower() + return value + + def get(self, key, phase): + planned = self.value[key]['planned'] + if key == 'hwclock': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the value in the config file is the same as the 'planned' + # value, we need to check /etc/adjtime. + value = self._get_value_from_config('adjtime', phase) + elif key == 'name': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the planned values is the same as the one in the config file + # we need to check if /etc/localtime is also set to the 'planned' zone. + if os.path.islink('/etc/localtime'): + # If /etc/localtime is a symlink and is not set to the TZ we 'planned' + # to set, we need to return the TZ which the symlink points to. + if os.path.exists('/etc/localtime'): + # We use readlink() because on some distros zone files are symlinks + # to other zone files, so it's hard to get which TZ is actually set + # if we follow the symlink. + path = os.readlink('/etc/localtime') + linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE) + if linktz: + valuelink = linktz.group(1) + if valuelink != planned: + value = valuelink + else: + # Set current TZ to 'n/a' if the symlink points to a path + # which isn't a zone file. + value = 'n/a' + else: + # Set current TZ to 'n/a' if the symlink to the zone file is broken. + value = 'n/a' + else: + # If /etc/localtime is not a symlink best we can do is compare it with + # the 'planned' zone info file and return 'n/a' if they are different. + try: + if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned): + return 'n/a' + except Exception: + return 'n/a' + else: + self.abort('unknown parameter "%s"' % key) + return value + + def set_timezone(self, value): + self._edit_file(filename=self.conf_files['name'], + regexp=self.regexps['name'], + value=self.tzline_format % value, + key='name') + for cmd in self.update_timezone: + self.execute(cmd) + + def set_hwclock(self, value): + if value == 'local': + option = '--localtime' + utc = 'no' + else: + option = '--utc' + utc = 'yes' + if self.conf_files['hwclock'] is not None: + self._edit_file(filename=self.conf_files['hwclock'], + regexp=self.regexps['hwclock'], + value='UTC=%s\n' % utc, + key='hwclock') + self.execute(self.update_hwclock, '--systohc', option, log=True) + + def set(self, key, value): + if key == 'name': + self.set_timezone(value) + elif key == 'hwclock': + self.set_hwclock(value) + else: + self.abort('unknown parameter "%s"' % key) + + +class SmartOSTimezone(Timezone): + """This is a Timezone manipulation class for SmartOS instances. + + It uses the C(sm-set-timezone) utility to set the timezone, and + inspects C(/etc/default/init) to determine the current timezone. + + NB: A zone needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(SmartOSTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False) + if not self.settimezone: + module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.') + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/default/init`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + try: + f = open('/etc/default/init', 'r') + for line in f: + m = re.match('^TZ=(.*)$', line.strip()) + if m: + return m.groups()[0] + except Exception: + self.module.fail_json(msg='Failed to read /etc/default/init') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through sm-set-timezone, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + cmd = 'sm-set-timezone %s' % value + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # sm-set-timezone knows no state and will always set the timezone. + # XXX: https://github.com/joyent/smtools/pull/2 + m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1]) + if not (m and m.groups()[-1] == value): + self.module.fail_json(msg='Failed to set timezone') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class DarwinTimezone(Timezone): + """This is the timezone implementation for Darwin which, unlike other *BSD + implementations, uses the `systemsetup` command on Darwin to check/set + the timezone. + """ + + regexps = dict( + name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + def __init__(self, module): + super(DarwinTimezone, self).__init__(module) + self.systemsetup = module.get_bin_path('systemsetup', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_current_timezone(self, phase): + """Lookup the current timezone via `systemsetup -gettimezone`.""" + if phase not in self.status: + self.status[phase] = self.execute(self.systemsetup, '-gettimezone') + return self.status[phase] + + def _verify_timezone(self): + tz = self.value['name']['planned'] + # Lookup the list of supported timezones via `systemsetup -listtimezones`. + # Note: Skip the first line that contains the label 'Time Zones:' + out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:] + tz_list = list(map(lambda x: x.strip(), out)) + if tz not in tz_list: + self.abort('given timezone "%s" is not available' % tz) + return tz + + def get(self, key, phase): + if key == 'name': + status = self._get_current_timezone(phase) + value = self.regexps[key].search(status).group(1) + return value + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + self.execute(self.systemsetup, '-settimezone', value, log=True) + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class BSDTimezone(Timezone): + """This is the timezone implementation for *BSD which works simply through + updating the `/etc/localtime` symlink to point to a valid timezone name under + `/usr/share/zoneinfo`. + """ + + def __init__(self, module): + super(BSDTimezone, self).__init__(module) + + def __get_timezone(self): + zoneinfo_dir = '/usr/share/zoneinfo/' + localtime_file = '/etc/localtime' + + # Strategy 1: + # If /etc/localtime does not exist, assum the timezone is UTC. + if not os.path.exists(localtime_file): + self.module.warn('Could not read /etc/localtime. Assuming UTC.') + return 'UTC' + + # Strategy 2: + # Follow symlink of /etc/localtime + zoneinfo_file = localtime_file + while not zoneinfo_file.startswith(zoneinfo_dir): + try: + zoneinfo_file = os.readlink(localtime_file) + except OSError: + # OSError means "end of symlink chain" or broken link. + break + else: + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 3: + # (If /etc/localtime is not symlinked) + # Check all files in /usr/share/zoneinfo and return first non-link match. + for dname, _, fnames in sorted(os.walk(zoneinfo_dir)): + for fname in sorted(fnames): + zoneinfo_file = os.path.join(dname, fname) + if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 4: + # As a fall-back, return 'UTC' as default assumption. + self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.') + return 'UTC' + + def get(self, key, phase): + """Lookup the current timezone by resolving `/etc/localtime`.""" + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + # First determine if the requested timezone is valid by looking in + # the zoneinfo directory. + zonefile = '/usr/share/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone' % value) + except Exception: + self.module.fail_json(msg='Failed to stat %s' % zonefile) + + # Now (somewhat) atomically update the symlink by creating a new + # symlink and move it into place. Otherwise we have to remove the + # original symlink and create the new symlink, however that would + # create a race condition in case another process tries to read + # /etc/localtime between removal and creation. + suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)]) + new_localtime = '/etc/localtime.' + suffix + + try: + os.symlink(zonefile, new_localtime) + os.rename(new_localtime, '/etc/localtime') + except Exception: + os.remove(new_localtime) + self.module.fail_json(msg='Could not update /etc/localtime') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class AIXTimezone(Timezone): + """This is a Timezone manipulation class for AIX instances. + + It uses the C(chtz) utility to set the timezone, and + inspects C(/etc/environment) to determine the current timezone. + + While AIX time zones can be set using two formats (POSIX and + Olson) the prefered method is Olson. + See the following article for more information: + https://developer.ibm.com/articles/au-aix-posix/ + + NB: AIX needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(AIXTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('chtz', required=True) + + def __get_timezone(self): + """ Return the current value of TZ= in /etc/environment """ + try: + f = open('/etc/environment', 'r') + etcenvironment = f.read() + f.close() + except Exception: + self.module.fail_json(msg='Issue reading contents of /etc/environment') + + match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE) + if match: + return match.group(1) + else: + return None + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/environment`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through chtz, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values. + # It will only return non-zero if the chtz command itself fails, it does not check for + # valid timezones. We need to perform a basic check to confirm that the timezone + # definition exists in /usr/share/lib/zoneinfo + # This does mean that we can only support Olson for now. The below commented out regex + # detects Olson date formats, so in the future we could detect Posix or Olson and + # act accordingly. + + # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE) + # if not regex_olson.match(value): + # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value + # self.module.fail_json(msg=msg) + + # First determine if the requested timezone is valid by looking in the zoneinfo + # directory. + zonefile = '/usr/share/lib/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone.' % value) + except Exception: + self.module.fail_json(msg='Failed to check %s.' % zonefile) + + # Now set the TZ using chtz + cmd = 'chtz %s' % value + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # The best condition check we can do is to check the value of TZ after making the + # change. + TZ = self.__get_timezone() + if TZ != value: + msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value) + self.module.fail_json(msg=msg) + + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +def main(): + # Construct 'module' and 'tz' + module = AnsibleModule( + argument_spec=dict( + hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']), + name=dict(type='str'), + ), + required_one_of=[ + ['hwclock', 'name'] + ], + supports_check_mode=True, + ) + tz = Timezone(module) + + # Check the current state + tz.check(phase='before') + if module.check_mode: + diff = tz.diff('before', 'planned') + # In check mode, 'planned' state is treated as 'after' state + diff['after'] = diff.pop('planned') + else: + # Make change + tz.change() + # Check the current state + tz.check(phase='after') + # Examine if the current state matches planned state + (after, planned) = tz.diff('after', 'planned').values() + if after != planned: + tz.abort('still not desired state, though changes have made - ' + 'planned: %s, after: %s' % (str(planned), str(after))) + diff = tz.diff('before', 'after') + + changed = (diff['before'] != diff['after']) + if len(tz.msg) > 0: + module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + else: + module.exit_json(changed=changed, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/zypper.py b/test/support/integration/plugins/modules/zypper.py new file mode 100644 index 0000000..bfb3181 --- /dev/null +++ b/test/support/integration/plugins/modules/zypper.py @@ -0,0 +1,540 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com> +# based on +# openbsd_pkg +# (c) 2013 +# Patrik Lundin <patrik.lundin.swe@gmail.com> +# +# yum +# (c) 2012, Red Hat, Inc +# Written by Seth Vidal <skvidal at fedoraproject.org> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zypper +author: + - "Patrick Callahan (@dirtyharrycallahan)" + - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" +version_added: "1.2" +short_description: Manage packages on SUSE and openSUSE +description: + - Manage packages on SUSE and openSUSE using the zypper and rpm tools. +options: + name: + description: + - Package name C(name) or package specifier or a list of either. + - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to + update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using state=latest, this can be '*', which updates all installed packages. + required: true + aliases: [ 'pkg' ] + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - When using C(dist-upgrade), I(name) should be C('*'). + required: false + choices: [ present, latest, absent, dist-upgrade ] + default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage, application ] + default: "package" + version_added: "2.0" + extra_args_precommand: + version_added: "2.6" + required: false + description: + - Add additional global target options to C(zypper). + - Options should be supplied in a single line as if given in the command line. + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package + signature being installed. Has an effect only if state is + I(present) or I(latest). + required: false + default: "no" + type: bool + disable_recommends: + version_added: "1.8" + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does + install recommended packages. + required: false + default: "yes" + type: bool + force: + version_added: "2.2" + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: "no" + type: bool + force_resolution: + version_added: "2.10" + description: + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + required: false + default: "no" + type: bool + update_cache: + version_added: "2.2" + description: + - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. + required: false + default: "no" + type: bool + aliases: [ "refresh" ] + oldpackage: + version_added: "2.2" + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a + version is specified as part of the package name. + required: false + default: "no" + type: bool + extra_args: + version_added: "2.4" + required: false + description: + - Add additional options to C(zypper) command. + - Options should be supplied in a single line as if given in the command line. +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +# informational: requirements for nodes +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm +''' + +EXAMPLES = ''' +# Install "nmap" +- zypper: + name: nmap + state: present + +# Install apache2 with recommended packages +- zypper: + name: apache2 + state: present + disable_recommends: no + +# Apply a given patch +- zypper: + name: openSUSE-2016-128 + state: present + type: patch + +# Remove the "nmap" package +- zypper: + name: nmap + state: absent + +# Install the nginx rpm from a remote repo +- zypper: + name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' + state: present + +# Install local rpm file +- zypper: + name: /tmp/fancy-software.rpm + state: present + +# Update all packages +- zypper: + name: '*' + state: latest + +# Apply all available patches +- zypper: + name: '*' + state: latest + type: patch + +# Perform a dist-upgrade with additional arguments +- zypper: + name: '*' + state: dist-upgrade + extra_args: '--no-allow-vendor-change --allow-arch-change' + +# Refresh repositories and update package "openssl" +- zypper: + name: openssl + state: present + update_cache: yes + +# Install specific version (possible comparisons: <, >, <=, >=, =) +- zypper: + name: 'docker>=1.10' + state: present + +# Wait 20 seconds to acquire the lock before failing +- zypper: + name: mosh + state: present + environment: + ZYPP_LOCK_TIMEOUT: 20 +''' + +import xml +import re +from xml.dom.minidom import parseString as parseXML +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +class Package: + def __init__(self, name, prefix, version): + self.name = name + self.prefix = prefix + self.version = version + self.shouldinstall = (prefix == '+') + + def __str__(self): + return self.prefix + self.name + self.version + + +def split_name_version(name): + """splits of the package name and desired version + + example formats: + - docker>=1.10 + - apache=2.4 + + Allowed version specifiers: <, >, <=, >=, = + Allowed version format: [0-9.-]* + + Also allows a prefix indicating remove "-", "~" or install "+" + """ + + prefix = '' + if name[0] in ['-', '~', '+']: + prefix = name[0] + name = name[1:] + if prefix == '~': + prefix = '-' + + version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + try: + reres = version_check.match(name) + name, version = reres.groups() + if version is None: + version = '' + return prefix, name, version + except Exception: + return prefix, name, '' + + +def get_want_state(names, remove=False): + packages = [] + urls = [] + for name in names: + if '://' in name or name.endswith('.rpm'): + urls.append(name) + else: + prefix, pname, version = split_name_version(name) + if prefix not in ['-', '+']: + if remove: + prefix = '-' + else: + prefix = '+' + packages.append(Package(pname, prefix, version)) + return packages, urls + + +def get_installed_state(m, packages): + "get installed state of packages" + + cmd = get_cmd(m, 'search') + cmd.extend(['--match-exact', '--details', '--installed-only']) + cmd.extend([p.name for p in packages]) + return parse_zypper_xml(m, cmd, fail_not_found=False)[0] + + +def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + try: + dom = parseXML(stdout) + except xml.parsers.expat.ExpatError as exc: + m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), + rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + if rc == 104: + # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) + if fail_not_found: + errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + else: + return {}, rc, stdout, stderr + elif rc in [0, 106, 103]: + # zypper exit codes + # 0: success + # 106: signature verification failed + # 103: zypper was upgraded, run same command again + if packages is None: + firstrun = True + packages = {} + solvable_list = dom.getElementsByTagName('solvable') + for solvable in solvable_list: + name = solvable.getAttribute('name') + packages[name] = {} + packages[name]['version'] = solvable.getAttribute('edition') + packages[name]['oldversion'] = solvable.getAttribute('edition-old') + status = solvable.getAttribute('status') + packages[name]['installed'] = status == "installed" + packages[name]['group'] = solvable.parentNode.nodeName + if rc == 103 and firstrun: + # if this was the first run and it failed with 103 + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + + return packages, rc, stdout, stderr + m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + +def get_cmd(m, subcommand): + "puts together the basic zypper command arguments with those passed to the module" + is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] + is_refresh = subcommand == 'refresh' + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] + if m.params['extra_args_precommand']: + args_list = m.params['extra_args_precommand'].split() + cmd.extend(args_list) + # add global options before zypper command + if (is_install or is_refresh) and m.params['disable_gpg_check']: + cmd.append('--no-gpg-checks') + + if subcommand == 'search': + cmd.append('--disable-repositories') + + cmd.append(subcommand) + if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: + cmd.extend(['--type', m.params['type']]) + if m.check_mode and subcommand != 'search': + cmd.append('--dry-run') + if is_install: + cmd.append('--auto-agree-with-licenses') + if m.params['disable_recommends']: + cmd.append('--no-recommends') + if m.params['force']: + cmd.append('--force') + if m.params['force_resolution']: + cmd.append('--force-resolution') + if m.params['oldpackage']: + cmd.append('--oldpackage') + if m.params['extra_args']: + args_list = m.params['extra_args'].split(' ') + cmd.extend(args_list) + + return cmd + + +def set_diff(m, retvals, result): + # TODO: if there is only one package, set before/after to version numbers + packages = {'installed': [], 'removed': [], 'upgraded': []} + if result: + for p in result: + group = result[p]['group'] + if group == 'to-upgrade': + versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' + packages['upgraded'].append(p + versions) + elif group == 'to-install': + packages['installed'].append(p) + elif group == 'to-remove': + packages['removed'].append(p) + + output = '' + for state in packages: + if packages[state]: + output += state + ': ' + ', '.join(packages[state]) + '\n' + if 'diff' not in retvals: + retvals['diff'] = {} + if 'prepared' not in retvals['diff']: + retvals['diff']['prepared'] = output + else: + retvals['diff']['prepared'] += '\n' + output + + +def package_present(m, name, want_latest): + "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + packages, urls = get_want_state(name) + + # add oldpackage flag when a version is given to allow downgrades + if any(p.version for p in packages): + m.params['oldpackage'] = True + + if not want_latest: + # for state=present: filter out already installed packages + # if a version is given leave the package in to let zypper handle the version + # resolution + packageswithoutversion = [p for p in packages if not p.version] + prerun_state = get_installed_state(m, packageswithoutversion) + # generate lists of packages to install or remove + packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] + + if not packages and not urls: + # nothing to install/remove and nothing to update + return None, retvals + + # zypper install also updates packages + cmd = get_cmd(m, 'install') + cmd.append('--') + cmd.extend(urls) + # pass packages to zypper + # allow for + or - prefixes in install/remove lists + # also add version specifier if given + # do this in one zypper run to allow for dependency-resolution + # for example "-exim postfix" runs without removing packages depending on mailserver + cmd.extend([str(p) for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return result, retvals + + +def package_update_all(m): + "run update or patch on all available packages" + + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + if m.params['type'] == 'patch': + cmdname = 'patch' + elif m.params['state'] == 'dist-upgrade': + cmdname = 'dist-upgrade' + else: + cmdname = 'update' + + cmd = get_cmd(m, cmdname) + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def package_absent(m, name): + "remove the packages in name" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + # Get package state + packages, urls = get_want_state(name, remove=True) + if any(p.prefix == '+' for p in packages): + m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") + if urls: + m.fail_json(msg="Can not remove via URL.") + if m.params['type'] == 'patch': + m.fail_json(msg="Can not remove patches.") + prerun_state = get_installed_state(m, packages) + packages = [p for p in packages if p.name in prerun_state] + + if not packages: + return None, retvals + + cmd = get_cmd(m, 'remove') + cmd.extend([p.name + p.version for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def repo_refresh(m): + "update the repositories" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + + cmd = get_cmd(m, 'refresh') + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return retvals + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['pkg'], type='list'), + state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), + type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + extra_args_precommand=dict(required=False, default=None), + disable_gpg_check=dict(required=False, default='no', type='bool'), + disable_recommends=dict(required=False, default='yes', type='bool'), + force=dict(required=False, default='no', type='bool'), + force_resolution=dict(required=False, default='no', type='bool'), + update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'), + oldpackage=dict(required=False, default='no', type='bool'), + extra_args=dict(required=False, default=None), + ), + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + update_cache = module.params['update_cache'] + + # remove empty strings from package list + name = list(filter(None, name)) + + # Refresh repositories + if update_cache and not module.check_mode: + retvals = repo_refresh(module) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper refresh run failed.", **retvals) + + # Perform requested action + if name == ['*'] and state in ['latest', 'dist-upgrade']: + packages_changed, retvals = package_update_all(module) + elif name != ['*'] and state == 'dist-upgrade': + module.fail_json(msg="Can not dist-upgrade specific packages.") + else: + if state in ['absent', 'removed']: + packages_changed, retvals = package_absent(module, name) + elif state in ['installed', 'present', 'latest']: + packages_changed, retvals = package_present(module, name, state == 'latest') + + retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) + + if module._diff: + set_diff(module, retvals, packages_changed) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper run failed.", **retvals) + + if not retvals['changed']: + del retvals['stdout'] + del retvals['stderr'] + + module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) + + +if __name__ == "__main__": + main() |