summaryrefslogtreecommitdiffstats
path: root/test/support/integration/plugins/module_utils
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /test/support/integration/plugins/module_utils
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/support/integration/plugins/module_utils')
-rw-r--r--test/support/integration/plugins/module_utils/aws/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/aws/core.py335
-rw-r--r--test/support/integration/plugins/module_utils/aws/iam.py49
-rw-r--r--test/support/integration/plugins/module_utils/aws/s3.py50
-rw-r--r--test/support/integration/plugins/module_utils/aws/waiters.py405
-rw-r--r--test/support/integration/plugins/module_utils/azure_rm_common.py1473
-rw-r--r--test/support/integration/plugins/module_utils/azure_rm_common_rest.py97
-rw-r--r--test/support/integration/plugins/module_utils/cloud.py217
-rw-r--r--test/support/integration/plugins/module_utils/compat/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/compat/ipaddress.py2476
-rw-r--r--test/support/integration/plugins/module_utils/crypto.py2125
-rw-r--r--test/support/integration/plugins/module_utils/database.py142
-rw-r--r--test/support/integration/plugins/module_utils/docker/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/docker/common.py1022
-rw-r--r--test/support/integration/plugins/module_utils/docker/swarm.py280
-rw-r--r--test/support/integration/plugins/module_utils/ec2.py758
-rw-r--r--test/support/integration/plugins/module_utils/ecs/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/ecs/api.py364
-rw-r--r--test/support/integration/plugins/module_utils/mysql.py106
-rw-r--r--test/support/integration/plugins/module_utils/net_tools/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/network/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/network/common/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/network/common/utils.py643
-rw-r--r--test/support/integration/plugins/module_utils/postgres.py330
-rw-r--r--test/support/integration/plugins/module_utils/rabbitmq.py220
25 files changed, 11092 insertions, 0 deletions
diff --git a/test/support/integration/plugins/module_utils/aws/__init__.py b/test/support/integration/plugins/module_utils/aws/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/__init__.py
diff --git a/test/support/integration/plugins/module_utils/aws/core.py b/test/support/integration/plugins/module_utils/aws/core.py
new file mode 100644
index 00000000..c4527b6d
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/core.py
@@ -0,0 +1,335 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+**This code is not yet ready for use in user modules. As of 2017**
+**and through to 2018, the interface is likely to change**
+**aggressively as the exact correct interface for ansible AWS modules**
+**is identified. In particular, until this notice goes away or is**
+**changed, methods may disappear from the interface. Please don't**
+**publish modules using this except directly to the main Ansible**
+**development repository.**
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible.module_utils.aws import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import logging
+import traceback
+from functools import wraps
+from distutils.version import LooseVersion
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ # Python 3
+ from io import StringIO
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
+
+# We will also export HAS_BOTO3 so end user modules can use it.
+__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
+
+
+class AnsibleAWSModule(object):
+ """An ansible module class for AWS modules
+
+ AnsibleAWSModule provides an a class for building modules which
+ connect to Amazon Web Services. The interface is currently more
+ restricted than the basic module class with the aim that later the
+ basic module class can be reduced. If you find that any key
+ feature is missing please contact the author/Ansible AWS team
+ (available on #ansible-aws on IRC) to request the additional
+ features needed.
+ """
+ default_settings = {
+ "default_args": True,
+ "check_boto3": True,
+ "auto_retry": True,
+ "module_class": AnsibleModule
+ }
+
+ def __init__(self, **kwargs):
+ local_settings = {}
+ for key in AnsibleAWSModule.default_settings:
+ try:
+ local_settings[key] = kwargs.pop(key)
+ except KeyError:
+ local_settings[key] = AnsibleAWSModule.default_settings[key]
+ self.settings = local_settings
+
+ if local_settings["default_args"]:
+ # ec2_argument_spec contains the region so we use that; there's a patch coming which
+ # will add it to aws_argument_spec so if that's accepted then later we should change
+ # over
+ argument_spec_full = ec2_argument_spec()
+ try:
+ argument_spec_full.update(kwargs["argument_spec"])
+ except (TypeError, NameError):
+ pass
+ kwargs["argument_spec"] = argument_spec_full
+
+ self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
+
+ if local_settings["check_boto3"] and not HAS_BOTO3:
+ self._module.fail_json(
+ msg=missing_required_lib('botocore or boto3'))
+
+ self.check_mode = self._module.check_mode
+ self._diff = self._module._diff
+ self._name = self._module._name
+
+ self._botocore_endpoint_log_stream = StringIO()
+ self.logger = None
+ if self.params.get('debug_botocore_endpoint_logs'):
+ self.logger = logging.getLogger('botocore.endpoint')
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
+
+ @property
+ def params(self):
+ return self._module.params
+
+ def _get_resource_action_list(self):
+ actions = []
+ for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ ln = ln.strip()
+ if not ln:
+ continue
+ found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
+ if found_operational_request:
+ operation_request = found_operational_request.group(0)[20:-1]
+ resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
+ actions.append("{0}:{1}".format(resource, operation_request))
+ return list(set(actions))
+
+ def exit_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.exit_json(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.fail_json(*args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ return self._module.debug(*args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ return self._module.warn(*args, **kwargs)
+
+ def deprecate(self, *args, **kwargs):
+ return self._module.deprecate(*args, **kwargs)
+
+ def boolean(self, *args, **kwargs):
+ return self._module.boolean(*args, **kwargs)
+
+ def md5(self, *args, **kwargs):
+ return self._module.md5(*args, **kwargs)
+
+ def client(self, service, retry_decorator=None):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ conn = boto3_conn(self, conn_type='client', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+
+ def resource(self, service):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ return boto3_conn(self, conn_type='resource', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+
+ @property
+ def region(self, boto3=True):
+ return get_aws_region(self, boto3)
+
+ def fail_json_aws(self, exception, msg=None):
+ """call fail_json with processed exception
+
+ function for converting exceptions thrown by AWS SDK modules,
+ botocore, boto3 and boto, into nice error messages.
+ """
+ last_traceback = traceback.format_exc()
+
+ # to_native is trusted to handle exceptions that str() could
+ # convert to text.
+ try:
+ except_msg = to_native(exception.message)
+ except AttributeError:
+ except_msg = to_native(exception)
+
+ if msg is not None:
+ message = '{0}: {1}'.format(msg, except_msg)
+ else:
+ message = except_msg
+
+ try:
+ response = exception.response
+ except AttributeError:
+ response = None
+
+ failure = dict(
+ msg=message,
+ exception=last_traceback,
+ **self._gather_versions()
+ )
+
+ if response is not None:
+ failure.update(**camel_dict_to_snake_dict(response))
+
+ self.fail_json(**failure)
+
+ def _gather_versions(self):
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if neither are installed
+ """
+ if not HAS_BOTO3:
+ return {}
+ import boto3
+ import botocore
+ return dict(boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__)
+
+ def boto3_at_least(self, desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+
+ def botocore_at_least(self, desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
+
+
+class _RetryingBotoClientWrapper(object):
+ __never_wait = (
+ 'get_paginator', 'can_paginate',
+ 'get_waiter', 'generate_presigned_url',
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(aws_retry=False, *args, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
+
+
+def is_boto3_error_code(code, e=None):
+ """Check if the botocore exception is raised by a specific error code.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_instances(InstanceIds=['potato'])
+ except is_boto3_error_code('InvalidInstanceID.Malformed'):
+ # handle the error for that code case
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def get_boto3_client_method_parameters(client, method_name, required=False):
+ op = client.meta.method_to_api_mapping.get(method_name)
+ input_shape = client._service_model.operation_model(op).input_shape
+ if not input_shape:
+ parameters = []
+ elif required:
+ parameters = list(input_shape.required_members)
+ else:
+ parameters = list(input_shape.members.keys())
+ return parameters
diff --git a/test/support/integration/plugins/module_utils/aws/iam.py b/test/support/integration/plugins/module_utils/aws/iam.py
new file mode 100644
index 00000000..f05999aa
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/iam.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+try:
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+from ansible.module_utils._text import to_native
+
+
+def get_aws_account_id(module):
+ """ Given AnsibleAWSModule instance, get the active AWS account ID
+
+ get_account_id tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privilages to
+ the account should be enough to permit this.
+ """
+ account_id = None
+ try:
+ sts_client = module.client('sts')
+ account_id = sts_client.get_caller_identity().get('Account')
+ # non-STS sessions may also get NoCredentialsError from this STS call, so
+ # we must catch that too and try the IAM version
+ except (ClientError, NoCredentialsError):
+ try:
+ iam_client = module.client('iam')
+ account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
+ except ClientError as e:
+ if (e.response['Error']['Code'] == 'AccessDenied'):
+ except_msg = to_native(e)
+ # don't match on `arn:aws` because of China region `arn:aws-cn` and similar
+ account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
+ if account_id is None:
+ module.fail_json_aws(e, msg="Could not get AWS account information")
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
+ exception=traceback.format_exc()
+ )
+ if not account_id:
+ module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
+ return to_native(account_id)
diff --git a/test/support/integration/plugins/module_utils/aws/s3.py b/test/support/integration/plugins/module_utils/aws/s3.py
new file mode 100644
index 00000000..2185869d
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/s3.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by the calling module
+
+HAS_MD5 = True
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ HAS_MD5 = False
+
+
+def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ with open(filename, 'rb') as f:
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ digests.append(md5(f.read(int(head['ContentLength']))))
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(module.md5(filename))
diff --git a/test/support/integration/plugins/module_utils/aws/waiters.py b/test/support/integration/plugins/module_utils/aws/waiters.py
new file mode 100644
index 00000000..25db598b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/waiters.py
@@ -0,0 +1,405 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore.waiter as core_waiter
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+
+ec2_data = {
+ "version": 2,
+ "waiters": {
+ "InternetGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInternetGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(InternetGateways) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "RouteTableExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeRouteTables",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(RouteTables[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidRouteTableID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SecurityGroupExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSecurityGroups",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(SecurityGroups[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidGroup.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetHasMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetHasAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "success"
+ },
+ ]
+ },
+ "VpnGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpnGateways[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpnGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayDetached": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "VpnGateways[0].State == 'available'",
+ "state": "success"
+ },
+ ]
+ },
+ }
+}
+
+
+waf_data = {
+ "version": 2,
+ "waiters": {
+ "ChangeTokenInSync": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "GetChangeTokenStatus",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "ChangeTokenStatus == 'INSYNC'",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "WAFInternalErrorException",
+ "state": "retry"
+ }
+ ]
+ }
+ }
+}
+
+eks_data = {
+ "version": 2,
+ "waiters": {
+ "ClusterActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "cluster.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "ClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "cluster.status != 'DELETED'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ }
+ }
+}
+
+
+rds_data = {
+ "version": 2,
+ "waiters": {
+ "DBInstanceStopped": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].DBInstanceStatus",
+ "expected": "stopped"
+ },
+ ]
+ }
+ }
+}
+
+
+def ec2_model(name):
+ ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
+ return ec2_models.get_waiter(name)
+
+
+def waf_model(name):
+ waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
+ return waf_models.get_waiter(name)
+
+
+def eks_model(name):
+ eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
+ return eks_models.get_waiter(name)
+
+
+def rds_model(name):
+ rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
+ return rds_models.get_waiter(name)
+
+
+waiters_by_name = {
+ ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_exists',
+ ec2_model('InternetGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
+ 'route_table_exists',
+ ec2_model('RouteTableExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_route_tables
+ )),
+ ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
+ 'security_group_exists',
+ ec2_model('SecurityGroupExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_security_groups
+ )),
+ ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
+ 'subnet_exists',
+ ec2_model('SubnetExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_map_public',
+ ec2_model('SubnetHasMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_map_public',
+ ec2_model('SubnetNoMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_assign_ipv6',
+ ec2_model('SubnetHasAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_assign_ipv6',
+ ec2_model('SubnetNoAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
+ 'subnet_deleted',
+ ec2_model('SubnetDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_exists',
+ ec2_model('VpnGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_detached',
+ ec2_model('VpnGatewayDetached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
+ 'cluster_active',
+ eks_model('ClusterActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
+ 'cluster_deleted',
+ eks_model('ClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
+ 'db_instance_stopped',
+ rds_model('DBInstanceStopped'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+}
+
+
+def get_waiter(client, waiter_name):
+ try:
+ return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
+ except KeyError:
+ raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
+ waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/test/support/integration/plugins/module_utils/azure_rm_common.py b/test/support/integration/plugins/module_utils/azure_rm_common.py
new file mode 100644
index 00000000..a7b55e97
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/azure_rm_common.py
@@ -0,0 +1,1473 @@
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+import os
+import re
+import types
+import copy
+import inspect
+import traceback
+import json
+
+from os.path import expanduser
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+try:
+ from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+except Exception:
+ ANSIBLE_VERSION = 'unknown'
+from ansible.module_utils.six.moves import configparser
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+AZURE_COMMON_ARGS = dict(
+ auth_source=dict(
+ type='str',
+ choices=['auto', 'cli', 'env', 'credential_file', 'msi']
+ ),
+ profile=dict(type='str'),
+ subscription_id=dict(type='str'),
+ client_id=dict(type='str', no_log=True),
+ secret=dict(type='str', no_log=True),
+ tenant=dict(type='str', no_log=True),
+ ad_user=dict(type='str', no_log=True),
+ password=dict(type='str', no_log=True),
+ cloud_environment=dict(type='str', default='AzureCloud'),
+ cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
+ api_profile=dict(type='str', default='latest'),
+ adfs_authority_url=dict(type='str', default=None)
+)
+
+AZURE_CREDENTIAL_ENV_MAPPING = dict(
+ profile='AZURE_PROFILE',
+ subscription_id='AZURE_SUBSCRIPTION_ID',
+ client_id='AZURE_CLIENT_ID',
+ secret='AZURE_SECRET',
+ tenant='AZURE_TENANT',
+ ad_user='AZURE_AD_USER',
+ password='AZURE_PASSWORD',
+ cloud_environment='AZURE_CLOUD_ENVIRONMENT',
+ cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
+ adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
+)
+
+
+class SDKProfile(object): # pylint: disable=too-few-public-methods
+
+ def __init__(self, default_api_version, profile=None):
+ """Constructor.
+
+ :param str default_api_version: Default API version if not overridden by a profile. Nullable.
+ :param profile: A dict operation group name to API version.
+ :type profile: dict[str, str]
+ """
+ self.profile = profile if profile is not None else {}
+ self.profile[None] = default_api_version
+
+ @property
+ def default_api_version(self):
+ return self.profile[None]
+
+
+# FUTURE: this should come from the SDK or an external location.
+# For now, we have to copy from azure-cli
+AZURE_API_PROFILES = {
+ 'latest': {
+ 'ContainerInstanceManagementClient': '2018-02-01-preview',
+ 'ComputeManagementClient': dict(
+ default_api_version='2018-10-01',
+ resource_skus='2018-10-01',
+ disks='2018-06-01',
+ snapshots='2018-10-01',
+ virtual_machine_run_commands='2018-10-01'
+ ),
+ 'NetworkManagementClient': '2018-08-01',
+ 'ResourceManagementClient': '2017-05-10',
+ 'StorageManagementClient': '2017-10-01',
+ 'WebSiteManagementClient': '2018-02-01',
+ 'PostgreSQLManagementClient': '2017-12-01',
+ 'MySQLManagementClient': '2017-12-01',
+ 'MariaDBManagementClient': '2019-03-01',
+ 'ManagementLockClient': '2016-09-01'
+ },
+ '2019-03-01-hybrid': {
+ 'StorageManagementClient': '2017-10-01',
+ 'NetworkManagementClient': '2017-10-01',
+ 'ComputeManagementClient': SDKProfile('2017-12-01', {
+ 'resource_skus': '2017-09-01',
+ 'disks': '2017-03-30',
+ 'snapshots': '2017-03-30'
+ }),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2016-09-01',
+ 'PolicyClient': '2016-12-01',
+ 'ResourceManagementClient': '2018-05-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01',
+ 'policy_assignments': '2016-12-01',
+ 'policy_definitions': '2016-12-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2017-11-09',
+ 'azure.multiapi.cosmosdb': '2017-04-17'
+ },
+ '2018-03-01-hybrid': {
+ 'StorageManagementClient': '2016-01-01',
+ 'NetworkManagementClient': '2017-10-01',
+ 'ComputeManagementClient': SDKProfile('2017-03-30'),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2016-09-01',
+ 'PolicyClient': '2016-12-01',
+ 'ResourceManagementClient': '2018-02-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2017-04-17',
+ 'azure.multiapi.cosmosdb': '2017-04-17'
+ },
+ '2017-03-09-profile': {
+ 'StorageManagementClient': '2016-01-01',
+ 'NetworkManagementClient': '2015-06-15',
+ 'ComputeManagementClient': SDKProfile('2016-03-30'),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2015-01-01',
+ 'PolicyClient': '2015-10-01-preview',
+ 'ResourceManagementClient': '2016-02-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2015-04-05'
+ }
+}
+
+AZURE_TAG_ARGS = dict(
+ tags=dict(type='dict'),
+ append_tags=dict(type='bool', default=True),
+)
+
+AZURE_COMMON_REQUIRED_IF = [
+ ('log_mode', 'file', ['log_path'])
+]
+
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
+CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
+VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
+
+CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
+ r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
+
+AZURE_SUCCESS_STATE = "Succeeded"
+AZURE_FAILED_STATE = "Failed"
+
+HAS_AZURE = True
+HAS_AZURE_EXC = None
+HAS_AZURE_CLI_CORE = True
+HAS_AZURE_CLI_CORE_EXC = None
+
+HAS_MSRESTAZURE = True
+HAS_MSRESTAZURE_EXC = None
+
+try:
+ import importlib
+except ImportError:
+ # This passes the sanity import test, but does not provide a user friendly error message.
+ # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
+ importlib = None
+
+try:
+ from packaging.version import Version
+ HAS_PACKAGING_VERSION = True
+ HAS_PACKAGING_VERSION_EXC = None
+except ImportError:
+ Version = None
+ HAS_PACKAGING_VERSION = False
+ HAS_PACKAGING_VERSION_EXC = traceback.format_exc()
+
+# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
+try:
+ from msrest.serialization import Serializer
+except ImportError:
+ HAS_MSRESTAZURE_EXC = traceback.format_exc()
+ HAS_MSRESTAZURE = False
+
+try:
+ from enum import Enum
+ from msrestazure.azure_active_directory import AADTokenCredentials
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_active_directory import MSIAuthentication
+ from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
+ from msrestazure import azure_cloud
+ from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
+ from azure.mgmt.monitor.version import VERSION as monitor_client_version
+ from azure.mgmt.network.version import VERSION as network_client_version
+ from azure.mgmt.storage.version import VERSION as storage_client_version
+ from azure.mgmt.compute.version import VERSION as compute_client_version
+ from azure.mgmt.resource.version import VERSION as resource_client_version
+ from azure.mgmt.dns.version import VERSION as dns_client_version
+ from azure.mgmt.web.version import VERSION as web_client_version
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.resource.subscriptions import SubscriptionClient
+ from azure.mgmt.storage import StorageManagementClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.mgmt.dns import DnsManagementClient
+ from azure.mgmt.monitor import MonitorManagementClient
+ from azure.mgmt.web import WebSiteManagementClient
+ from azure.mgmt.containerservice import ContainerServiceClient
+ from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
+ from azure.mgmt.trafficmanager import TrafficManagerManagementClient
+ from azure.storage.cloudstorageaccount import CloudStorageAccount
+ from azure.storage.blob import PageBlobService, BlockBlobService
+ from adal.authentication_context import AuthenticationContext
+ from azure.mgmt.sql import SqlManagementClient
+ from azure.mgmt.servicebus import ServiceBusManagementClient
+ import azure.mgmt.servicebus.models as ServicebusModel
+ from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
+ from azure.mgmt.rdbms.mysql import MySQLManagementClient
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from azure.mgmt.containerregistry import ContainerRegistryManagementClient
+ from azure.mgmt.containerinstance import ContainerInstanceManagementClient
+ from azure.mgmt.loganalytics import LogAnalyticsManagementClient
+ import azure.mgmt.loganalytics.models as LogAnalyticsModels
+ from azure.mgmt.automation import AutomationClient
+ import azure.mgmt.automation.models as AutomationModel
+ from azure.mgmt.iothub import IotHubClient
+ from azure.mgmt.iothub import models as IoTHubModels
+ from msrest.service_client import ServiceClient
+ from msrestazure import AzureConfiguration
+ from msrest.authentication import Authentication
+ from azure.mgmt.resource.locks import ManagementLockClient
+except ImportError as exc:
+ Authentication = object
+ HAS_AZURE_EXC = traceback.format_exc()
+ HAS_AZURE = False
+
+from base64 import b64encode, b64decode
+from hashlib import sha256
+from hmac import HMAC
+from time import time
+
+try:
+ from urllib import (urlencode, quote_plus)
+except ImportError:
+ from urllib.parse import (urlencode, quote_plus)
+
+try:
+ from azure.cli.core.util import CLIError
+ from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
+ from azure.common.cloud import get_cli_active_cloud
+except ImportError:
+ HAS_AZURE_CLI_CORE = False
+ HAS_AZURE_CLI_CORE_EXC = None
+ CLIError = Exception
+
+
+def azure_id_to_dict(id):
+ pieces = re.sub(r'^\/', '', id).split('/')
+ result = {}
+ index = 0
+ while index < len(pieces) - 1:
+ result[pieces[index]] = pieces[index + 1]
+ index += 1
+ return result
+
+
+def format_resource_id(val, subscription_id, namespace, types, resource_group):
+ return resource_id(name=val,
+ resource_group=resource_group,
+ namespace=namespace,
+ type=types,
+ subscription=subscription_id) if not is_valid_resource_id(val) else val
+
+
+def normalize_location_name(name):
+ return name.replace(' ', '').lower()
+
+
+# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
+# or generate the requirements files from this so we only have one source of truth to maintain...
+AZURE_PKG_VERSIONS = {
+ 'StorageManagementClient': {
+ 'package_name': 'storage',
+ 'expected_version': '3.1.0'
+ },
+ 'ComputeManagementClient': {
+ 'package_name': 'compute',
+ 'expected_version': '4.4.0'
+ },
+ 'ContainerInstanceManagementClient': {
+ 'package_name': 'containerinstance',
+ 'expected_version': '0.4.0'
+ },
+ 'NetworkManagementClient': {
+ 'package_name': 'network',
+ 'expected_version': '2.3.0'
+ },
+ 'ResourceManagementClient': {
+ 'package_name': 'resource',
+ 'expected_version': '2.1.0'
+ },
+ 'DnsManagementClient': {
+ 'package_name': 'dns',
+ 'expected_version': '2.1.0'
+ },
+ 'WebSiteManagementClient': {
+ 'package_name': 'web',
+ 'expected_version': '0.41.0'
+ },
+ 'TrafficManagerManagementClient': {
+ 'package_name': 'trafficmanager',
+ 'expected_version': '0.50.0'
+ },
+} if HAS_AZURE else {}
+
+
+AZURE_MIN_RELEASE = '2.0.0'
+
+
+class AzureRMModuleBase(object):
+ def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None,
+ required_one_of=None, add_file_common_args=False, supports_check_mode=False,
+ required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(AZURE_COMMON_ARGS)
+ if supports_tags:
+ merged_arg_spec.update(AZURE_TAG_ARGS)
+
+ if derived_arg_spec:
+ merged_arg_spec.update(derived_arg_spec)
+
+ merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
+ if required_if:
+ merged_required_if += required_if
+
+ self.module = AnsibleModule(argument_spec=merged_arg_spec,
+ bypass_checks=bypass_checks,
+ no_log=no_log,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of,
+ add_file_common_args=add_file_common_args,
+ supports_check_mode=supports_check_mode,
+ required_if=merged_required_if)
+
+ if not HAS_PACKAGING_VERSION:
+ self.fail(msg=missing_required_lib('packaging'),
+ exception=HAS_PACKAGING_VERSION_EXC)
+
+ if not HAS_MSRESTAZURE:
+ self.fail(msg=missing_required_lib('msrestazure'),
+ exception=HAS_MSRESTAZURE_EXC)
+
+ if not HAS_AZURE:
+ self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)),
+ exception=HAS_AZURE_EXC)
+
+ self._network_client = None
+ self._storage_client = None
+ self._resource_client = None
+ self._compute_client = None
+ self._dns_client = None
+ self._web_client = None
+ self._marketplace_client = None
+ self._sql_client = None
+ self._mysql_client = None
+ self._mariadb_client = None
+ self._postgresql_client = None
+ self._containerregistry_client = None
+ self._containerinstance_client = None
+ self._containerservice_client = None
+ self._managedcluster_client = None
+ self._traffic_manager_management_client = None
+ self._monitor_client = None
+ self._resource = None
+ self._log_analytics_client = None
+ self._servicebus_client = None
+ self._automation_client = None
+ self._IoThub_client = None
+ self._lock_client = None
+
+ self.check_mode = self.module.check_mode
+ self.api_profile = self.module.params.get('api_profile')
+ self.facts_module = facts_module
+ # self.debug = self.module.params.get('debug')
+
+ # delegate auth to AzureRMAuth class (shared with all plugin types)
+ self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params)
+
+ # common parameter validation
+ if self.module.params.get('tags'):
+ self.validate_tags(self.module.params['tags'])
+
+ if not skip_exec:
+ res = self.exec_module(**self.module.params)
+ self.module.exit_json(**res)
+
+ def check_client_version(self, client_type):
+ # Ensure Azure modules are at least 2.0.0rc5.
+ package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
+ if package_version is not None:
+ client_name = package_version.get('package_name')
+ try:
+ client_module = importlib.import_module(client_type.__module__)
+ client_version = client_module.VERSION
+ except (RuntimeError, AttributeError):
+ # can't get at the module version for some reason, just fail silently...
+ return
+ expected_version = package_version.get('expected_version')
+ if Version(client_version) < Version(expected_version):
+ self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try "
+ "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
+ if Version(client_version) != Version(expected_version):
+ self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try "
+ "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
+
+ def exec_module(self, **kwargs):
+ self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
+
+ def fail(self, msg, **kwargs):
+ '''
+ Shortcut for calling module.fail()
+
+ :param msg: Error message text.
+ :param kwargs: Any key=value pairs
+ :return: None
+ '''
+ self.module.fail_json(msg=msg, **kwargs)
+
+ def deprecate(self, msg, version=None, collection_name=None):
+ self.module.deprecate(msg, version, collection_name=collection_name)
+
+ def log(self, msg, pretty_print=False):
+ if pretty_print:
+ self.module.debug(json.dumps(msg, indent=4, sort_keys=True))
+ else:
+ self.module.debug(msg)
+
+ def validate_tags(self, tags):
+ '''
+ Check if tags dictionary contains string:string pairs.
+
+ :param tags: dictionary of string:string pairs
+ :return: None
+ '''
+ if not self.facts_module:
+ if not isinstance(tags, dict):
+ self.fail("Tags must be a dictionary of string:string values.")
+ for key, value in tags.items():
+ if not isinstance(value, str):
+ self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
+
+ def update_tags(self, tags):
+ '''
+ Call from the module to update metadata tags. Returns tuple
+ with bool indicating if there was a change and dict of new
+ tags to assign to the object.
+
+ :param tags: metadata tags from the object
+ :return: bool, dict
+ '''
+ tags = tags or dict()
+ new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
+ param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict()
+ append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True
+ changed = False
+ # check add or update
+ for key, value in param_tags.items():
+ if not new_tags.get(key) or new_tags[key] != value:
+ changed = True
+ new_tags[key] = value
+ # check remove
+ if not append_tags:
+ for key, value in tags.items():
+ if not param_tags.get(key):
+ new_tags.pop(key)
+ changed = True
+ return changed, new_tags
+
+ def has_tags(self, obj_tags, tag_list):
+ '''
+ Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
+ exists in object tags.
+
+ :param obj_tags: dictionary of tags from an Azure object.
+ :param tag_list: list of tag keys or tag key:value pairs
+ :return: bool
+ '''
+
+ if not obj_tags and tag_list:
+ return False
+
+ if not tag_list:
+ return True
+
+ matches = 0
+ result = False
+ for tag in tag_list:
+ tag_key = tag
+ tag_value = None
+ if ':' in tag:
+ tag_key, tag_value = tag.split(':')
+ if tag_value and obj_tags.get(tag_key) == tag_value:
+ matches += 1
+ elif not tag_value and obj_tags.get(tag_key):
+ matches += 1
+ if matches == len(tag_list):
+ result = True
+ return result
+
+ def get_resource_group(self, resource_group):
+ '''
+ Fetch a resource group.
+
+ :param resource_group: name of a resource group
+ :return: resource group object
+ '''
+ try:
+ return self.rm_client.resource_groups.get(resource_group)
+ except CloudError as cloud_error:
+ self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
+ except Exception as exc:
+ self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
+
+ def parse_resource_to_dict(self, resource):
+ '''
+ Return a dict of the give resource, which contains name and resource group.
+
+ :param resource: It can be a resource name, id or a dict contains name and resource group.
+ '''
+ resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource
+ resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group)
+ resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id)
+ return resource_dict
+
+ def serialize_obj(self, obj, class_name, enum_modules=None):
+ '''
+ Return a JSON representation of an Azure object.
+
+ :param obj: Azure object
+ :param class_name: Name of the object's class
+ :param enum_modules: List of module names to build enum dependencies from.
+ :return: serialized result
+ '''
+ enum_modules = [] if enum_modules is None else enum_modules
+
+ dependencies = dict()
+ if enum_modules:
+ for module_name in enum_modules:
+ mod = importlib.import_module(module_name)
+ for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
+ dependencies[mod_class_name] = mod_class_obj
+ self.log("dependencies: ")
+ self.log(str(dependencies))
+ serializer = Serializer(classes=dependencies)
+ return serializer.body(obj, class_name, keep_readonly=True)
+
+ def get_poller_result(self, poller, wait=5):
+ '''
+ Consistent method of waiting on and retrieving results from Azure's long poller
+
+ :param poller Azure poller object
+ :return object resulting from the original request
+ '''
+ try:
+ delay = wait
+ while not poller.done():
+ self.log("Waiting for {0} sec".format(delay))
+ poller.wait(timeout=delay)
+ return poller.result()
+ except Exception as exc:
+ self.log(str(exc))
+ raise
+
+ def check_provisioning_state(self, azure_object, requested_state='present'):
+ '''
+ Check an Azure object's provisioning state. If something did not complete the provisioning
+ process, then we cannot operate on it.
+
+ :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
+ and name attributes.
+ :return None
+ '''
+
+ if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
+ hasattr(azure_object, 'name'):
+ # resource group object fits this model
+ if isinstance(azure_object.properties.provisioning_state, Enum):
+ if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
+ requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+ if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
+ requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+
+ if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
+ if isinstance(azure_object.provisioning_state, Enum):
+ if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+ if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
+
+ def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
+ keys = dict()
+ try:
+ # Get keys from the storage account
+ self.log('Getting keys')
+ account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
+ except Exception as exc:
+ self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
+
+ try:
+ self.log('Create blob service')
+ if storage_blob_type == 'page':
+ return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
+ account_name=storage_account_name,
+ account_key=account_keys.keys[0].value)
+ elif storage_blob_type == 'block':
+ return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
+ account_name=storage_account_name,
+ account_key=account_keys.keys[0].value)
+ else:
+ raise Exception("Invalid storage blob type defined.")
+ except Exception as exc:
+ self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
+ str(exc)))
+
+ def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None):
+ '''
+ Create a default public IP address <public_ip_name> to associate with a network interface.
+ If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
+
+ :param resource_group: name of an existing resource group
+ :param location: a valid azure location
+ :param public_ip_name: base name to assign the public IP address
+ :param allocation_method: one of 'Static' or 'Dynamic'
+ :param sku: sku
+ :return: PIP object
+ '''
+ pip = None
+
+ self.log("Starting create_default_pip {0}".format(public_ip_name))
+ self.log("Check to see if public IP {0} exists".format(public_ip_name))
+ try:
+ pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
+ except CloudError:
+ pass
+
+ if pip:
+ self.log("Public ip {0} found.".format(public_ip_name))
+ self.check_provisioning_state(pip)
+ return pip
+
+ params = self.network_models.PublicIPAddress(
+ location=location,
+ public_ip_allocation_method=allocation_method,
+ sku=sku
+ )
+ self.log('Creating default public IP {0}'.format(public_ip_name))
+ try:
+ poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
+ except Exception as exc:
+ self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
+
+ return self.get_poller_result(poller)
+
+ def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
+ '''
+ Create a default security group <security_group_name> to associate with a network interface. If a security group matching
+ <security_group_name> exists, return it. Otherwise, create one.
+
+ :param resource_group: Resource group name
+ :param location: azure location name
+ :param security_group_name: base name to use for the security group
+ :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
+ :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
+ :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
+ :return: security_group object
+ '''
+ group = None
+
+ self.log("Create security group {0}".format(security_group_name))
+ self.log("Check to see if security group {0} exists".format(security_group_name))
+ try:
+ group = self.network_client.network_security_groups.get(resource_group, security_group_name)
+ except CloudError:
+ pass
+
+ if group:
+ self.log("Security group {0} found.".format(security_group_name))
+ self.check_provisioning_state(group)
+ return group
+
+ parameters = self.network_models.NetworkSecurityGroup()
+ parameters.location = location
+
+ if not open_ports:
+ # Open default ports based on OS type
+ if os_type == 'Linux':
+ # add an inbound SSH rule
+ parameters.security_rules = [
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow SSH Access',
+ source_port_range='*',
+ destination_port_range='22',
+ priority=100,
+ name='SSH')
+ ]
+ parameters.location = location
+ else:
+ # for windows add inbound RDP and WinRM rules
+ parameters.security_rules = [
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow RDP port 3389',
+ source_port_range='*',
+ destination_port_range='3389',
+ priority=100,
+ name='RDP01'),
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow WinRM HTTPS port 5986',
+ source_port_range='*',
+ destination_port_range='5986',
+ priority=101,
+ name='WinRM01'),
+ ]
+ else:
+ # Open custom ports
+ parameters.security_rules = []
+ priority = 100
+ for port in open_ports:
+ priority += 1
+ rule_name = "Rule_{0}".format(priority)
+ parameters.security_rules.append(
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ source_port_range='*',
+ destination_port_range=str(port),
+ priority=priority,
+ name=rule_name)
+ )
+
+ self.log('Creating default security group {0}'.format(security_group_name))
+ try:
+ poller = self.network_client.network_security_groups.create_or_update(resource_group,
+ security_group_name,
+ parameters)
+ except Exception as exc:
+ self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
+
+ return self.get_poller_result(poller)
+
+ @staticmethod
+ def _validation_ignore_callback(session, global_config, local_config, **kwargs):
+ session.verify = False
+
+ def get_api_profile(self, client_type_name, api_profile_name):
+ profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
+
+ if not profile_all_clients:
+ raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
+
+ profile_raw = profile_all_clients.get(client_type_name, None)
+
+ if not profile_raw:
+ self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
+
+ if isinstance(profile_raw, dict):
+ if not profile_raw.get('default_api_version'):
+ raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
+ return profile_raw
+
+ # wrap basic strings in a dict that just defines the default
+ return dict(default_api_version=profile_raw)
+
+ def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
+ self.log('Getting management service client {0}'.format(client_type.__name__))
+ self.check_client_version(client_type)
+
+ client_argspec = inspect.getargspec(client_type.__init__)
+
+ if not base_url:
+ # most things are resource_manager, don't make everyone specify
+ base_url = self.azure_auth._cloud_environment.endpoints.resource_manager
+
+ client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url)
+
+ api_profile_dict = {}
+
+ if self.api_profile:
+ api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
+
+ # unversioned clients won't accept profile; only send it if necessary
+ # clients without a version specified in the profile will use the default
+ if api_profile_dict and 'profile' in client_argspec.args:
+ client_kwargs['profile'] = api_profile_dict
+
+ # If the client doesn't accept api_version, it's unversioned.
+ # If it does, favor explicitly-specified api_version, fall back to api_profile
+ if 'api_version' in client_argspec.args:
+ profile_default_version = api_profile_dict.get('default_api_version', None)
+ if api_version or profile_default_version:
+ client_kwargs['api_version'] = api_version or profile_default_version
+ if 'profile' in client_kwargs:
+ # remove profile; only pass API version if specified
+ client_kwargs.pop('profile')
+
+ client = client_type(**client_kwargs)
+
+ # FUTURE: remove this once everything exposes models directly (eg, containerinstance)
+ try:
+ getattr(client, "models")
+ except AttributeError:
+ def _ansible_get_models(self, *arg, **kwarg):
+ return self._ansible_models
+
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ client.models = types.MethodType(_ansible_get_models, client)
+
+ client.config = self.add_user_agent(client.config)
+
+ if self.azure_auth._cert_validation_mode == 'ignore':
+ client.config.session_configuration_callback = self._validation_ignore_callback
+
+ return client
+
+ def add_user_agent(self, config):
+ # Add user agent for Ansible
+ config.add_user_agent(ANSIBLE_USER_AGENT)
+ # Add user agent when running from Cloud Shell
+ if CLOUDSHELL_USER_AGENT_KEY in os.environ:
+ config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
+ # Add user agent when running from VSCode extension
+ if VSCODEEXT_USER_AGENT_KEY in os.environ:
+ config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
+ return config
+
+ def generate_sas_token(self, **kwags):
+ base_url = kwags.get('base_url', None)
+ expiry = kwags.get('expiry', time() + 3600)
+ key = kwags.get('key', None)
+ policy = kwags.get('policy', None)
+ url = quote_plus(base_url)
+ ttl = int(expiry)
+ sign_key = '{0}\n{1}'.format(url, ttl)
+ signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
+ result = {
+ 'sr': url,
+ 'sig': signature,
+ 'se': str(ttl),
+ }
+ if policy:
+ result['skn'] = policy
+ return 'SharedAccessSignature ' + urlencode(result)
+
+ def get_data_svc_client(self, **kwags):
+ url = kwags.get('base_url', None)
+ config = AzureConfiguration(base_url='https://{0}'.format(url))
+ config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags))
+ config = self.add_user_agent(config)
+ return ServiceClient(creds=config.credentials, config=config)
+
+ # passthru methods to AzureAuth instance for backcompat
+ @property
+ def credentials(self):
+ return self.azure_auth.credentials
+
+ @property
+ def _cloud_environment(self):
+ return self.azure_auth._cloud_environment
+
+ @property
+ def subscription_id(self):
+ return self.azure_auth.subscription_id
+
+ @property
+ def storage_client(self):
+ self.log('Getting storage client...')
+ if not self._storage_client:
+ self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-07-01')
+ return self._storage_client
+
+ @property
+ def storage_models(self):
+ return StorageManagementClient.models("2018-07-01")
+
+ @property
+ def network_client(self):
+ self.log('Getting network client')
+ if not self._network_client:
+ self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2019-06-01')
+ return self._network_client
+
+ @property
+ def network_models(self):
+ self.log("Getting network models...")
+ return NetworkManagementClient.models("2018-08-01")
+
+ @property
+ def rm_client(self):
+ self.log('Getting resource manager client')
+ if not self._resource_client:
+ self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-05-10')
+ return self._resource_client
+
+ @property
+ def rm_models(self):
+ self.log("Getting resource manager models")
+ return ResourceManagementClient.models("2017-05-10")
+
+ @property
+ def compute_client(self):
+ self.log('Getting compute client')
+ if not self._compute_client:
+ self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2019-07-01')
+ return self._compute_client
+
+ @property
+ def compute_models(self):
+ self.log("Getting compute models")
+ return ComputeManagementClient.models("2019-07-01")
+
+ @property
+ def dns_client(self):
+ self.log('Getting dns client')
+ if not self._dns_client:
+ self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-05-01')
+ return self._dns_client
+
+ @property
+ def dns_models(self):
+ self.log("Getting dns models...")
+ return DnsManagementClient.models('2018-05-01')
+
+ @property
+ def web_client(self):
+ self.log('Getting web client')
+ if not self._web_client:
+ self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-02-01')
+ return self._web_client
+
+ @property
+ def containerservice_client(self):
+ self.log('Getting container service client')
+ if not self._containerservice_client:
+ self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-07-01')
+ return self._containerservice_client
+
+ @property
+ def managedcluster_models(self):
+ self.log("Getting container service models")
+ return ContainerServiceClient.models('2018-03-31')
+
+ @property
+ def managedcluster_client(self):
+ self.log('Getting container service client')
+ if not self._managedcluster_client:
+ self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-03-31')
+ return self._managedcluster_client
+
+ @property
+ def sql_client(self):
+ self.log('Getting SQL client')
+ if not self._sql_client:
+ self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._sql_client
+
+ @property
+ def postgresql_client(self):
+ self.log('Getting PostgreSQL client')
+ if not self._postgresql_client:
+ self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._postgresql_client
+
+ @property
+ def mysql_client(self):
+ self.log('Getting MySQL client')
+ if not self._mysql_client:
+ self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._mysql_client
+
+ @property
+ def mariadb_client(self):
+ self.log('Getting MariaDB client')
+ if not self._mariadb_client:
+ self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._mariadb_client
+
+ @property
+ def sql_client(self):
+ self.log('Getting SQL client')
+ if not self._sql_client:
+ self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._sql_client
+
+ @property
+ def containerregistry_client(self):
+ self.log('Getting container registry mgmt client')
+ if not self._containerregistry_client:
+ self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-10-01')
+
+ return self._containerregistry_client
+
+ @property
+ def containerinstance_client(self):
+ self.log('Getting container instance mgmt client')
+ if not self._containerinstance_client:
+ self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-06-01')
+
+ return self._containerinstance_client
+
+ @property
+ def marketplace_client(self):
+ self.log('Getting marketplace agreement client')
+ if not self._marketplace_client:
+ self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._marketplace_client
+
+ @property
+ def traffic_manager_management_client(self):
+ self.log('Getting traffic manager client')
+ if not self._traffic_manager_management_client:
+ self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._traffic_manager_management_client
+
+ @property
+ def monitor_client(self):
+ self.log('Getting monitor client')
+ if not self._monitor_client:
+ self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._monitor_client
+
+ @property
+ def log_analytics_client(self):
+ self.log('Getting log analytics client')
+ if not self._log_analytics_client:
+ self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._log_analytics_client
+
+ @property
+ def log_analytics_models(self):
+ self.log('Getting log analytics models')
+ return LogAnalyticsModels
+
+ @property
+ def servicebus_client(self):
+ self.log('Getting servicebus client')
+ if not self._servicebus_client:
+ self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._servicebus_client
+
+ @property
+ def servicebus_models(self):
+ return ServicebusModel
+
+ @property
+ def automation_client(self):
+ self.log('Getting automation client')
+ if not self._automation_client:
+ self._automation_client = self.get_mgmt_svc_client(AutomationClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._automation_client
+
+ @property
+ def automation_models(self):
+ return AutomationModel
+
+ @property
+ def IoThub_client(self):
+ self.log('Getting iothub client')
+ if not self._IoThub_client:
+ self._IoThub_client = self.get_mgmt_svc_client(IotHubClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._IoThub_client
+
+ @property
+ def IoThub_models(self):
+ return IoTHubModels
+
+ @property
+ def automation_client(self):
+ self.log('Getting automation client')
+ if not self._automation_client:
+ self._automation_client = self.get_mgmt_svc_client(AutomationClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._automation_client
+
+ @property
+ def automation_models(self):
+ return AutomationModel
+
+ @property
+ def lock_client(self):
+ self.log('Getting lock client')
+ if not self._lock_client:
+ self._lock_client = self.get_mgmt_svc_client(ManagementLockClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2016-09-01')
+ return self._lock_client
+
+ @property
+ def lock_models(self):
+ self.log("Getting lock models")
+ return ManagementLockClient.models('2016-09-01')
+
+
+class AzureSASAuthentication(Authentication):
+ """Simple SAS Authentication.
+ An implementation of Authentication in
+ https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py
+
+ :param str token: SAS token
+ """
+ def __init__(self, token):
+ self.token = token
+
+ def signed_session(self):
+ session = super(AzureSASAuthentication, self).signed_session()
+ session.headers['Authorization'] = self.token
+ return session
+
+
+class AzureRMAuthException(Exception):
+ pass
+
+
+class AzureRMAuth(object):
+ def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None,
+ tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate',
+ api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs):
+
+ if fail_impl:
+ self._fail_impl = fail_impl
+ else:
+ self._fail_impl = self._default_fail_impl
+
+ self._cloud_environment = None
+ self._adfs_authority_url = None
+
+ # authenticate
+ self.credentials = self._get_credentials(
+ dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret,
+ tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment,
+ cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url))
+
+ if not self.credentials:
+ if HAS_AZURE_CLI_CORE:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
+ else:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
+
+ # cert validation mode precedence: module-arg, credential profile, env, "validate"
+ self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \
+ os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
+
+ if self._cert_validation_mode not in ['validate', 'ignore']:
+ self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
+
+ # if cloud_environment specified, look up/build Cloud object
+ raw_cloud_env = self.credentials.get('cloud_environment')
+ if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
+ self._cloud_environment = raw_cloud_env
+ elif not raw_cloud_env:
+ self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
+ else:
+ # try to look up "well-known" values via the name attribute on azure_cloud members
+ all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
+ matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
+ if len(matched_clouds) == 1:
+ self._cloud_environment = matched_clouds[0]
+ elif len(matched_clouds) > 1:
+ self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
+ else:
+ if not urlparse.urlparse(raw_cloud_env).scheme:
+ self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
+ try:
+ self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
+ except Exception as e:
+ self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc())
+
+ if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
+ self.fail("Credentials did not include a subscription_id value.")
+ self.log("setting subscription_id")
+ self.subscription_id = self.credentials['subscription_id']
+
+ # get authentication authority
+ # for adfs, user could pass in authority or not.
+ # for others, use default authority from cloud environment
+ if self.credentials.get('adfs_authority_url') is None:
+ self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
+ else:
+ self._adfs_authority_url = self.credentials.get('adfs_authority_url')
+
+ # get resource from cloud environment
+ self._resource = self._cloud_environment.endpoints.active_directory_resource_id
+
+ if self.credentials.get('credentials') is not None:
+ # AzureCLI credentials
+ self.azure_credentials = self.credentials['credentials']
+ elif self.credentials.get('client_id') is not None and \
+ self.credentials.get('secret') is not None and \
+ self.credentials.get('tenant') is not None:
+ self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
+ secret=self.credentials['secret'],
+ tenant=self.credentials['tenant'],
+ cloud_environment=self._cloud_environment,
+ verify=self._cert_validation_mode == 'validate')
+
+ elif self.credentials.get('ad_user') is not None and \
+ self.credentials.get('password') is not None and \
+ self.credentials.get('client_id') is not None and \
+ self.credentials.get('tenant') is not None:
+
+ self.azure_credentials = self.acquire_token_with_username_password(
+ self._adfs_authority_url,
+ self._resource,
+ self.credentials['ad_user'],
+ self.credentials['password'],
+ self.credentials['client_id'],
+ self.credentials['tenant'])
+
+ elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
+ tenant = self.credentials.get('tenant')
+ if not tenant:
+ tenant = 'common' # SDK default
+
+ self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
+ self.credentials['password'],
+ tenant=tenant,
+ cloud_environment=self._cloud_environment,
+ verify=self._cert_validation_mode == 'validate')
+ else:
+ self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
+ "Credentials must include client_id, secret and tenant or ad_user and password, or "
+ "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
+ "be logged in using AzureCLI.")
+
+ def fail(self, msg, exception=None, **kwargs):
+ self._fail_impl(msg)
+
+ def _default_fail_impl(self, msg, exception=None, **kwargs):
+ raise AzureRMAuthException(msg)
+
+ def _get_profile(self, profile="default"):
+ path = expanduser("~/.azure/credentials")
+ try:
+ config = configparser.ConfigParser()
+ config.read(path)
+ except Exception as exc:
+ self.fail("Failed to access {0}. Check that the file exists and you have read "
+ "access. {1}".format(path, str(exc)))
+ credentials = dict()
+ for key in AZURE_CREDENTIAL_ENV_MAPPING:
+ try:
+ credentials[key] = config.get(profile, key, raw=True)
+ except Exception:
+ pass
+
+ if credentials.get('subscription_id'):
+ return credentials
+
+ return None
+
+ def _get_msi_credentials(self, subscription_id_param=None, **kwargs):
+ client_id = kwargs.get('client_id', None)
+ credentials = MSIAuthentication(client_id=client_id)
+ subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
+ if not subscription_id:
+ try:
+ # use the first subscription of the MSI
+ subscription_client = SubscriptionClient(credentials)
+ subscription = next(subscription_client.subscriptions.list())
+ subscription_id = str(subscription.subscription_id)
+ except Exception as exc:
+ self.fail("Failed to get MSI token: {0}. "
+ "Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc)))
+ return {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id
+ }
+
+ def _get_azure_cli_credentials(self):
+ credentials, subscription_id = get_azure_cli_credentials()
+ cloud_environment = get_cli_active_cloud()
+
+ cli_credentials = {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id,
+ 'cloud_environment': cloud_environment
+ }
+ return cli_credentials
+
+ def _get_env_credentials(self):
+ env_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ env_credentials[attribute] = os.environ.get(env_variable, None)
+
+ if env_credentials['profile']:
+ credentials = self._get_profile(env_credentials['profile'])
+ return credentials
+
+ if env_credentials.get('subscription_id') is not None:
+ return env_credentials
+
+ return None
+
+ # TODO: use explicit kwargs instead of intermediate dict
+ def _get_credentials(self, params):
+ # Get authentication credentials.
+ self.log('Getting credentials')
+
+ arg_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ arg_credentials[attribute] = params.get(attribute, None)
+
+ auth_source = params.get('auth_source', None)
+ if not auth_source:
+ auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
+
+ if auth_source == 'msi':
+ self.log('Retrieving credenitals from MSI')
+ return self._get_msi_credentials(arg_credentials['subscription_id'], client_id=params.get('client_id', None))
+
+ if auth_source == 'cli':
+ if not HAS_AZURE_CLI_CORE:
+ self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'),
+ exception=HAS_AZURE_CLI_CORE_EXC)
+ try:
+ self.log('Retrieving credentials from Azure CLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as err:
+ self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
+
+ if auth_source == 'env':
+ self.log('Retrieving credentials from environment')
+ env_credentials = self._get_env_credentials()
+ return env_credentials
+
+ if auth_source == 'credential_file':
+ self.log("Retrieving credentials from credential file")
+ profile = params.get('profile') or 'default'
+ default_credentials = self._get_profile(profile)
+ return default_credentials
+
+ # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
+ # try module params
+ if arg_credentials['profile'] is not None:
+ self.log('Retrieving credentials with profile parameter.')
+ credentials = self._get_profile(arg_credentials['profile'])
+ return credentials
+
+ if arg_credentials['subscription_id']:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ # try environment
+ env_credentials = self._get_env_credentials()
+ if env_credentials:
+ self.log('Received credentials from env.')
+ return env_credentials
+
+ # try default profile from ~./azure/credentials
+ default_credentials = self._get_profile()
+ if default_credentials:
+ self.log('Retrieved default profile credentials from ~/.azure/credentials.')
+ return default_credentials
+
+ try:
+ if HAS_AZURE_CLI_CORE:
+ self.log('Retrieving credentials from AzureCLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as ce:
+ self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
+
+ return None
+
+ def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
+ authority_uri = authority
+
+ if tenant is not None:
+ authority_uri = authority + '/' + tenant
+
+ context = AuthenticationContext(authority_uri)
+ token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
+
+ return AADTokenCredentials(token_response)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # Use only during module development
+ # if self.debug:
+ # log_file = open('azure_rm.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, indent=4, sort_keys=True))
+ # else:
+ # log_file.write(msg + u'\n')
diff --git a/test/support/integration/plugins/module_utils/azure_rm_common_rest.py b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py
new file mode 100644
index 00000000..4fd7eaa3
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_configuration import AzureConfiguration
+ from msrest.service_client import ServiceClient
+ from msrest.pipeline import ClientRawResponse
+ from msrest.polling import LROPoller
+ from msrestazure.polling.arm_polling import ARMPolling
+ import uuid
+ import json
+except ImportError:
+ # This is handled in azure_rm_common
+ AzureConfiguration = object
+
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
+
+
+class GenericRestClientConfiguration(AzureConfiguration):
+
+ def __init__(self, credentials, subscription_id, base_url=None):
+
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+ if not base_url:
+ base_url = 'https://management.azure.com'
+
+ super(GenericRestClientConfiguration, self).__init__(base_url)
+
+ self.add_user_agent(ANSIBLE_USER_AGENT)
+
+ self.credentials = credentials
+ self.subscription_id = subscription_id
+
+
+class GenericRestClient(object):
+
+ def __init__(self, credentials, subscription_id, base_url=None):
+ self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url)
+ self._client = ServiceClient(self.config.credentials, self.config)
+ self.models = None
+
+ def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval):
+ # Construct and send request
+ operation_config = {}
+
+ request = None
+
+ if header_parameters is None:
+ header_parameters = {}
+
+ header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
+
+ if method == 'GET':
+ request = self._client.get(url, query_parameters)
+ elif method == 'PUT':
+ request = self._client.put(url, query_parameters)
+ elif method == 'POST':
+ request = self._client.post(url, query_parameters)
+ elif method == 'HEAD':
+ request = self._client.head(url, query_parameters)
+ elif method == 'PATCH':
+ request = self._client.patch(url, query_parameters)
+ elif method == 'DELETE':
+ request = self._client.delete(url, query_parameters)
+ elif method == 'MERGE':
+ request = self._client.merge(url, query_parameters)
+
+ response = self._client.send(request, header_parameters, body, **operation_config)
+
+ if response.status_code not in expected_status_codes:
+ exp = CloudError(response)
+ exp.request_id = response.headers.get('x-ms-request-id')
+ raise exp
+ elif response.status_code == 202 and polling_timeout > 0:
+ def get_long_running_output(response):
+ return response
+ poller = LROPoller(self._client,
+ ClientRawResponse(None, response),
+ get_long_running_output,
+ ARMPolling(polling_interval, **operation_config))
+ response = self.get_poller_result(poller, polling_timeout)
+
+ return response
+
+ def get_poller_result(self, poller, timeout):
+ try:
+ poller.wait(timeout=timeout)
+ return poller.result()
+ except Exception as exc:
+ raise
diff --git a/test/support/integration/plugins/module_utils/cloud.py b/test/support/integration/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..0d29071f
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/cloud.py
@@ -0,0 +1,217 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/test/support/integration/plugins/module_utils/compat/__init__.py b/test/support/integration/plugins/module_utils/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/compat/__init__.py
diff --git a/test/support/integration/plugins/module_utils/compat/ipaddress.py b/test/support/integration/plugins/module_utils/compat/ipaddress.py
new file mode 100644
index 00000000..c46ad72a
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/compat/ipaddress.py
@@ -0,0 +1,2476 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file, and this file only, is based on
+# Lib/ipaddress.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import unicode_literals
+
+
+import itertools
+import struct
+
+
+# The following makes it easier for us to script updates of the bundled code and is not part of
+# upstream
+_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
+
+__version__ = '1.0.22'
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+ _compat_int_types = (int, long)
+except NameError:
+ pass
+try:
+ _compat_str = unicode
+except NameError:
+ _compat_str = str
+ assert bytes != str
+if b'\0'[0] == 0: # Python 3 semantics
+ def _compat_bytes_to_byte_vals(byt):
+ return byt
+else:
+ def _compat_bytes_to_byte_vals(byt):
+ return [struct.unpack(b'!B', b)[0] for b in byt]
+try:
+ _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+ def _compat_int_from_byte_vals(bytvals, endianess):
+ assert endianess == 'big'
+ res = 0
+ for bv in bytvals:
+ assert isinstance(bv, _compat_int_types)
+ res = (res << 8) + bv
+ return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+ assert isinstance(intval, _compat_int_types)
+ assert endianess == 'big'
+ if length == 4:
+ if intval < 0 or intval >= 2 ** 32:
+ raise struct.error("integer out of range for 'I' format code")
+ return struct.pack(b'!I', intval)
+ elif length == 16:
+ if intval < 0 or intval >= 2 ** 128:
+ raise struct.error("integer out of range for 'QQ' format code")
+ return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
+ else:
+ raise NotImplementedError()
+
+
+if hasattr(int, 'bit_length'):
+ # Not int.bit_length , since that won't work in 2.7 where long exists
+ def _compat_bit_length(i):
+ return i.bit_length()
+else:
+ def _compat_bit_length(i):
+ for res in itertools.count():
+ if i >> res == 0:
+ return res
+
+
+def _compat_range(start, end, step=1):
+ assert step > 0
+ i = start
+ while i < end:
+ yield i
+ i += step
+
+
+class _TotalOrderingMixin(object):
+ __slots__ = ()
+
+ # Helper that derives the other comparison operations from
+ # __lt__ and __eq__
+ # We avoid functools.total_ordering because it doesn't handle
+ # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+ def __eq__(self, other):
+ raise NotImplementedError
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not equal
+
+ def __lt__(self, other):
+ raise NotImplementedError
+
+ def __le__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented or not less:
+ return self.__eq__(other)
+ return less
+
+ def __gt__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not (less or equal)
+
+ def __ge__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ '%r does not appear to be an IPv4 or IPv6 address. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?' % address)
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
+ address)
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ '%r does not appear to be an IPv4 or IPv6 network. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?' % address)
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
+ address)
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
+ address)
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return _compat_to_bytes(address, 4, 'big')
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return _compat_to_bytes(address, 16, 'big')
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = _compat_str(address).split('/')
+ if len(addr) > 2:
+ raise AddressValueError("Only one '/' permitted in %r" % address)
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it) # pylint: disable=stop-iteration-return
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if (not (isinstance(first, _BaseAddress) and
+ isinstance(last, _BaseAddress))):
+ raise TypeError('first and last must be IP addresses, not networks')
+ if first.version != last.version:
+ raise TypeError("%s and %s are not of the same version" % (
+ first, last))
+ if first > last:
+ raise ValueError('last IP address must be greater than first')
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError('unknown IP version')
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
+ _compat_bit_length(last_int - first_int + 1) - 1)
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted,
+ # last.network_address <= net.network_address is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterator of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, nets[-1]))
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return _compat_str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = '%200s has no version specified' % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._max_prefixlen,
+ self._version))
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = (
+ '%r (len %d != %d) is not permitted as an IPv%d address. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?')
+ raise AddressValueError(msg % (address, address_len,
+ expected_len, self._version))
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(ip_int,
+ cls._max_prefixlen)
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = _compat_to_bytes(ip_int, byteslen, 'big')
+ msg = 'Netmask pattern %r mixes zeroes & ones'
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = '%r is not a valid netmask' % netmask_str
+ raise NetmaskValueError(msg)
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ def __reduce__(self):
+ return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return (self._ip == other._ip and
+ self._version == other._version)
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseAddress):
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return _compat_str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return '%s/%d' % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError('address out of range')
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError('address out of range')
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (self._version == other._version and
+ self.network_address == other.network_address and
+ int(self.netmask) == int(other.netmask))
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return (int(self.network_address) <= int(other._ip) <=
+ int(self.broadcast_address))
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other or (
+ other.network_address in self or (
+ other.broadcast_address in self)))
+
+ @property
+ def broadcast_address(self):
+ x = self._cache.get('broadcast_address')
+ if x is None:
+ x = self._address_class(int(self.network_address) |
+ int(self.hostmask))
+ self._cache['broadcast_address'] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get('hostmask')
+ if x is None:
+ x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+ self._cache['hostmask'] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%d' % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = '%200s has no associated address class' % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ self, other))
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError('%s not contained in %s' % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__('%s/%s' % (other.network_address,
+ other.prefixlen))
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError('new prefix must be longer')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError('prefix length diff must be > 0')
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ 'prefix length diff %d is invalid for netblock %s' % (
+ new_prefixlen, self))
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in _compat_range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError('new prefix must be shorter')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+ (self.prefixlen, prefixlen_diff))
+ return self.__class__((
+ int(self.network_address) & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen))
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (self.network_address.is_multicast and
+ self.broadcast_address.is_multicast)
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError("%s and %s are not of the same version" % (a, b))
+ return (b.network_address <= a.network_address and
+ b.broadcast_address >= a.broadcast_address)
+ except AttributeError:
+ raise TypeError("Unable to test subnet containment "
+ "between %s and %s" % (a, b))
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (self.network_address.is_reserved and
+ self.broadcast_address.is_reserved)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (self.network_address.is_link_local and
+ self.broadcast_address.is_link_local)
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (self.network_address.is_private and
+ self.broadcast_address.is_private)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (self.network_address.is_unspecified and
+ self.broadcast_address.is_unspecified)
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (self.network_address.is_loopback and
+ self.broadcast_address.is_loopback)
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2 ** IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset('0123456789')
+
+ # the valid octets for host and netmasks. only useful for IPv4.
+ _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return _compat_str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ octets = ip_str.split('.')
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return _compat_int_from_byte_vals(
+ map(cls._parse_octet, octets), 'big')
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ # Any octets that look like they *might* be written in octal,
+ # and which don't look exactly the same in both octal and
+ # decimal are rejected as ambiguous
+ if octet_int > 7 and octet_str[0] == '0':
+ msg = "Ambiguous (octal/decimal) value in %r not permitted"
+ raise ValueError(msg % octet_str)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
+ if isinstance(b, bytes)
+ else b)
+ for b in _compat_to_bytes(ip_int, 4, 'big'))
+
+ def _is_hostmask(self, ip_str):
+ """Test if the IP string is a hostmask (rather than a netmask).
+
+ Args:
+ ip_str: A string, the potential hostmask.
+
+ Returns:
+ A boolean, True if the IP string is a hostmask.
+
+ """
+ bits = ip_str.split('.')
+ try:
+ parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+ except ValueError:
+ return False
+ if len(parts) != len(bits):
+ return False
+ if parts[0] < parts[-1]:
+ return True
+ return False
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = _compat_str(self).split('.')[::-1]
+ return '.'.join(reverse_octets) + '.in-addr.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if '/' in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ return (
+ self not in self._constants._public_network and
+ not self.is_private)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv4Address.__init__(self, address)
+ self.network = IPv4Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+
+ if isinstance(address, tuple):
+ IPv4Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.network = IPv4Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv4Address.__init__(self, addr[0])
+
+ self.network = IPv4Network(address, strict=False)
+ self._prefixlen = self.network._prefixlen
+
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self.network.prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.0.2/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Constructing from a packed address or integer
+ if isinstance(address, (_compat_int_types, bytes)):
+ self.network_address = IPv4Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen)
+ # fixme: address/network test here.
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ # We weren't given an address[1]
+ arg = self._max_prefixlen
+ self.network_address = IPv4Address(address[0])
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv4Address(packed &
+ int(self.netmask))
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+ self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
+ self.network_address):
+ raise ValueError('%s has host bits set' % self)
+ self.network_address = IPv4Address(int(self.network_address) &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (not (self.network_address in IPv4Network('100.64.0.0/10') and
+ self.broadcast_address in IPv4Network('100.64.0.0/10')) and
+ not self.is_private)
+
+
+class _IPv4Constants(object):
+
+ _linklocal_network = IPv4Network('169.254.0.0/16')
+
+ _loopback_network = IPv4Network('127.0.0.0/8')
+
+ _multicast_network = IPv4Network('224.0.0.0/4')
+
+ _public_network = IPv4Network('100.64.0.0/10')
+
+ _private_networks = [
+ IPv4Network('0.0.0.0/8'),
+ IPv4Network('10.0.0.0/8'),
+ IPv4Network('127.0.0.0/8'),
+ IPv4Network('169.254.0.0/16'),
+ IPv4Network('172.16.0.0/12'),
+ IPv4Network('192.0.0.0/29'),
+ IPv4Network('192.0.0.170/31'),
+ IPv4Network('192.0.2.0/24'),
+ IPv4Network('192.168.0.0/16'),
+ IPv4Network('198.18.0.0/15'),
+ IPv4Network('198.51.100.0/24'),
+ IPv4Network('203.0.113.0/24'),
+ IPv4Network('240.0.0.0/4'),
+ IPv4Network('255.255.255.255/32'),
+ ]
+
+ _reserved_network = IPv4Network('240.0.0.0/4')
+
+ _unspecified_address = IPv4Address('0.0.0.0')
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2 ** IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ parts = ip_str.split(':')
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if '.' in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+ parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append('%x' % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (
+ _max_parts - 1, ip_str)
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in _compat_range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == '0':
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (best_doublecolon_start +
+ best_doublecolon_len)
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += ['']
+ hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [''] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError('IPv6 address is too large')
+
+ hex_str = '%032x' % ip_int
+ hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ':'.join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = _compat_str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = _compat_str(self.ip)
+ else:
+ ip_str = _compat_str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = '%032x' % ip_int
+ parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return '%s/%d' % (':'.join(parts), self._prefixlen)
+ return ':'.join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(':', '')
+ return '.'.join(reverse_chars) + '.ip6.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if '/' in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, true if the address is not reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF))
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv6Address.__init__(self, address)
+ self.network = IPv6Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+ if isinstance(address, tuple):
+ IPv6Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv6Address.__init__(self, addr[0])
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self.network.prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Efficient constructor from integer or packed address
+ if isinstance(address, (bytes, _compat_int_types)):
+ self.network_address = IPv6Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen)
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ self.network_address = IPv6Address(address[0])
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv6Address(packed &
+ int(self.netmask))
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+
+ self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
+ self.network_address):
+ raise ValueError('%s has host bits set' % self)
+ self.network_address = IPv6Address(int(self.network_address) &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (self.network_address.is_site_local and
+ self.broadcast_address.is_site_local)
+
+
+class _IPv6Constants(object):
+
+ _linklocal_network = IPv6Network('fe80::/10')
+
+ _multicast_network = IPv6Network('ff00::/8')
+
+ _private_networks = [
+ IPv6Network('::1/128'),
+ IPv6Network('::/128'),
+ IPv6Network('::ffff:0:0/96'),
+ IPv6Network('100::/64'),
+ IPv6Network('2001::/23'),
+ IPv6Network('2001:2::/48'),
+ IPv6Network('2001:db8::/32'),
+ IPv6Network('2001:10::/28'),
+ IPv6Network('fc00::/7'),
+ IPv6Network('fe80::/10'),
+ ]
+
+ _reserved_networks = [
+ IPv6Network('::/8'), IPv6Network('100::/8'),
+ IPv6Network('200::/7'), IPv6Network('400::/6'),
+ IPv6Network('800::/5'), IPv6Network('1000::/4'),
+ IPv6Network('4000::/3'), IPv6Network('6000::/3'),
+ IPv6Network('8000::/3'), IPv6Network('A000::/3'),
+ IPv6Network('C000::/3'), IPv6Network('E000::/4'),
+ IPv6Network('F000::/5'), IPv6Network('F800::/6'),
+ IPv6Network('FE00::/9'),
+ ]
+
+ _sitelocal_network = IPv6Network('fec0::/10')
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/test/support/integration/plugins/module_utils/crypto.py b/test/support/integration/plugins/module_utils/crypto.py
new file mode 100644
index 00000000..e67eeff1
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/crypto.py
@@ -0,0 +1,2125 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ----------------------------------------------------------------------
+# A clearly marked portion of this file is licensed under the BSD license
+# Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
+# Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
+# For more details, search for the function _obj2txt().
+# ---------------------------------------------------------------------
+# A clearly marked portion of this file is extracted from a project that
+# is licensed under the Apache License 2.0
+# Copyright (c) the OpenSSL contributors
+# For more details, search for the function _OID_MAP.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import sys
+from distutils.version import LooseVersion
+
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+except ImportError:
+ # An error will be raised in the calling class to let the end
+ # user know that OpenSSL couldn't be found.
+ pass
+
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend as cryptography_backend
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives import serialization
+ import ipaddress
+
+ # Older versions of cryptography (< 2.1) do not have __hash__ functions for
+ # general name objects (DNSName, IPAddress, ...), while providing overloaded
+ # equality and string representation operations. This makes it impossible to
+ # use them in hash-based data structures such as set or dict. Since we are
+ # actually doing that in openssl_certificate, and potentially in other code,
+ # we need to monkey-patch __hash__ for these classes to make sure our code
+ # works fine.
+ if LooseVersion(cryptography.__version__) < LooseVersion('2.1'):
+ # A very simply hash function which relies on the representation
+ # of an object to be implemented. This is the case since at least
+ # cryptography 1.0, see
+ # https://github.com/pyca/cryptography/commit/7a9abce4bff36c05d26d8d2680303a6f64a0e84f
+ def simple_hash(self):
+ return hash(repr(self))
+
+ # The hash functions for the following types were added for cryptography 2.1:
+ # https://github.com/pyca/cryptography/commit/fbfc36da2a4769045f2373b004ddf0aff906cf38
+ x509.DNSName.__hash__ = simple_hash
+ x509.DirectoryName.__hash__ = simple_hash
+ x509.GeneralName.__hash__ = simple_hash
+ x509.IPAddress.__hash__ = simple_hash
+ x509.OtherName.__hash__ = simple_hash
+ x509.RegisteredID.__hash__ = simple_hash
+
+ if LooseVersion(cryptography.__version__) < LooseVersion('1.2'):
+ # The hash functions for the following types were added for cryptography 1.2:
+ # https://github.com/pyca/cryptography/commit/b642deed88a8696e5f01ce6855ccf89985fc35d0
+ # https://github.com/pyca/cryptography/commit/d1b5681f6db2bde7a14625538bd7907b08dfb486
+ x509.RFC822Name.__hash__ = simple_hash
+ x509.UniformResourceIdentifier.__hash__ = simple_hash
+
+ # Test whether we have support for X25519, X448, Ed25519 and/or Ed448
+ try:
+ import cryptography.hazmat.primitives.asymmetric.x25519
+ CRYPTOGRAPHY_HAS_X25519 = True
+ try:
+ cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes
+ CRYPTOGRAPHY_HAS_X25519_FULL = True
+ except AttributeError:
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ except ImportError:
+ CRYPTOGRAPHY_HAS_X25519 = False
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.x448
+ CRYPTOGRAPHY_HAS_X448 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_X448 = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.ed25519
+ CRYPTOGRAPHY_HAS_ED25519 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_ED25519 = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.ed448
+ CRYPTOGRAPHY_HAS_ED448 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_ED448 = False
+
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ # Error handled in the calling module.
+ CRYPTOGRAPHY_HAS_X25519 = False
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ CRYPTOGRAPHY_HAS_X448 = False
+ CRYPTOGRAPHY_HAS_ED25519 = False
+ CRYPTOGRAPHY_HAS_ED448 = False
+ HAS_CRYPTOGRAPHY = False
+
+
+import abc
+import base64
+import binascii
+import datetime
+import errno
+import hashlib
+import os
+import re
+import tempfile
+
+from ansible.module_utils import six
+from ansible.module_utils._text import to_native, to_bytes, to_text
+
+
+class OpenSSLObjectError(Exception):
+ pass
+
+
+class OpenSSLBadPassphraseError(OpenSSLObjectError):
+ pass
+
+
+def get_fingerprint_of_bytes(source):
+ """Generate the fingerprint of the given bytes."""
+
+ fingerprint = {}
+
+ try:
+ algorithms = hashlib.algorithms
+ except AttributeError:
+ try:
+ algorithms = hashlib.algorithms_guaranteed
+ except AttributeError:
+ return None
+
+ for algo in algorithms:
+ f = getattr(hashlib, algo)
+ try:
+ h = f(source)
+ except ValueError:
+ # This can happen for hash algorithms not supported in FIPS mode
+ # (https://github.com/ansible/ansible/issues/67213)
+ continue
+ try:
+ # Certain hash functions have a hexdigest() which expects a length parameter
+ pubkey_digest = h.hexdigest()
+ except TypeError:
+ pubkey_digest = h.hexdigest(32)
+ fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
+
+ return fingerprint
+
+
+def get_fingerprint(path, passphrase=None, content=None, backend='pyopenssl'):
+ """Generate the fingerprint of the public key. """
+
+ privatekey = load_privatekey(path, passphrase=passphrase, content=content, check_passphrase=False, backend=backend)
+
+ if backend == 'pyopenssl':
+ try:
+ publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
+ except AttributeError:
+ # If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
+ try:
+ bio = crypto._new_mem_buf()
+ rc = crypto._lib.i2d_PUBKEY_bio(bio, privatekey._pkey)
+ if rc != 1:
+ crypto._raise_current_error()
+ publickey = crypto._bio_to_string(bio)
+ except AttributeError:
+ # By doing this we prevent the code from raising an error
+ # yet we return no value in the fingerprint hash.
+ return None
+ elif backend == 'cryptography':
+ publickey = privatekey.public_key().public_bytes(
+ serialization.Encoding.DER,
+ serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+
+ return get_fingerprint_of_bytes(publickey)
+
+
+def load_file_if_exists(path, module=None, ignore_errors=False):
+ try:
+ with open(path, 'rb') as f:
+ return f.read()
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ return None
+ if ignore_errors:
+ return None
+ if module is None:
+ raise
+ module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
+ except Exception as exc:
+ if ignore_errors:
+ return None
+ if module is None:
+ raise
+ module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
+
+
+def load_privatekey(path, passphrase=None, check_passphrase=True, content=None, backend='pyopenssl'):
+ """Load the specified OpenSSL private key.
+
+ The content can also be specified via content; in that case,
+ this function will not load the key from disk.
+ """
+
+ try:
+ if content is None:
+ with open(path, 'rb') as b_priv_key_fh:
+ priv_key_detail = b_priv_key_fh.read()
+ else:
+ priv_key_detail = content
+
+ if backend == 'pyopenssl':
+
+ # First try: try to load with real passphrase (resp. empty string)
+ # Will work if this is the correct passphrase, or the key is not
+ # password-protected.
+ try:
+ result = crypto.load_privatekey(crypto.FILETYPE_PEM,
+ priv_key_detail,
+ to_bytes(passphrase or ''))
+ except crypto.Error as e:
+ if len(e.args) > 0 and len(e.args[0]) > 0:
+ if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
+ # This happens in case we have the wrong passphrase.
+ if passphrase is not None:
+ raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key!')
+ else:
+ raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
+ raise OpenSSLObjectError('Error while deserializing key: {0}'.format(e))
+ if check_passphrase:
+ # Next we want to make sure that the key is actually protected by
+ # a passphrase (in case we did try the empty string before, make
+ # sure that the key is not protected by the empty string)
+ try:
+ crypto.load_privatekey(crypto.FILETYPE_PEM,
+ priv_key_detail,
+ to_bytes('y' if passphrase == 'x' else 'x'))
+ if passphrase is not None:
+ # Since we can load the key without an exception, the
+ # key isn't password-protected
+ raise OpenSSLBadPassphraseError('Passphrase provided, but private key is not password-protected!')
+ except crypto.Error as e:
+ if passphrase is None and len(e.args) > 0 and len(e.args[0]) > 0:
+ if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
+ # The key is obviously protected by the empty string.
+ # Don't do this at home (if it's possible at all)...
+ raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
+ elif backend == 'cryptography':
+ try:
+ result = load_pem_private_key(priv_key_detail,
+ None if passphrase is None else to_bytes(passphrase),
+ cryptography_backend())
+ except TypeError as dummy:
+ raise OpenSSLBadPassphraseError('Wrong or empty passphrase provided for private key')
+ except ValueError as dummy:
+ raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key')
+
+ return result
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+
+
+def load_certificate(path, content=None, backend='pyopenssl'):
+ """Load the specified certificate."""
+
+ try:
+ if content is None:
+ with open(path, 'rb') as cert_fh:
+ cert_content = cert_fh.read()
+ else:
+ cert_content = content
+ if backend == 'pyopenssl':
+ return crypto.load_certificate(crypto.FILETYPE_PEM, cert_content)
+ elif backend == 'cryptography':
+ return x509.load_pem_x509_certificate(cert_content, cryptography_backend())
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+
+
+def load_certificate_request(path, content=None, backend='pyopenssl'):
+ """Load the specified certificate signing request."""
+ try:
+ if content is None:
+ with open(path, 'rb') as csr_fh:
+ csr_content = csr_fh.read()
+ else:
+ csr_content = content
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+ if backend == 'pyopenssl':
+ return crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content)
+ elif backend == 'cryptography':
+ return x509.load_pem_x509_csr(csr_content, cryptography_backend())
+
+
+def parse_name_field(input_dict):
+ """Take a dict with key: value or key: list_of_values mappings and return a list of tuples"""
+
+ result = []
+ for key in input_dict:
+ if isinstance(input_dict[key], list):
+ for entry in input_dict[key]:
+ result.append((key, entry))
+ else:
+ result.append((key, input_dict[key]))
+ return result
+
+
+def convert_relative_to_datetime(relative_time_string):
+ """Get a datetime.datetime or None from a string in the time format described in sshd_config(5)"""
+
+ parsed_result = re.match(
+ r"^(?P<prefix>[+-])((?P<weeks>\d+)[wW])?((?P<days>\d+)[dD])?((?P<hours>\d+)[hH])?((?P<minutes>\d+)[mM])?((?P<seconds>\d+)[sS]?)?$",
+ relative_time_string)
+
+ if parsed_result is None or len(relative_time_string) == 1:
+ # not matched or only a single "+" or "-"
+ return None
+
+ offset = datetime.timedelta(0)
+ if parsed_result.group("weeks") is not None:
+ offset += datetime.timedelta(weeks=int(parsed_result.group("weeks")))
+ if parsed_result.group("days") is not None:
+ offset += datetime.timedelta(days=int(parsed_result.group("days")))
+ if parsed_result.group("hours") is not None:
+ offset += datetime.timedelta(hours=int(parsed_result.group("hours")))
+ if parsed_result.group("minutes") is not None:
+ offset += datetime.timedelta(
+ minutes=int(parsed_result.group("minutes")))
+ if parsed_result.group("seconds") is not None:
+ offset += datetime.timedelta(
+ seconds=int(parsed_result.group("seconds")))
+
+ if parsed_result.group("prefix") == "+":
+ return datetime.datetime.utcnow() + offset
+ else:
+ return datetime.datetime.utcnow() - offset
+
+
+def get_relative_time_option(input_string, input_name, backend='cryptography'):
+ """Return an absolute timespec if a relative timespec or an ASN1 formatted
+ string is provided.
+
+ The return value will be a datetime object for the cryptography backend,
+ and a ASN1 formatted string for the pyopenssl backend."""
+ result = to_native(input_string)
+ if result is None:
+ raise OpenSSLObjectError(
+ 'The timespec "%s" for %s is not valid' %
+ input_string, input_name)
+ # Relative time
+ if result.startswith("+") or result.startswith("-"):
+ result_datetime = convert_relative_to_datetime(result)
+ if backend == 'pyopenssl':
+ return result_datetime.strftime("%Y%m%d%H%M%SZ")
+ elif backend == 'cryptography':
+ return result_datetime
+ # Absolute time
+ if backend == 'pyopenssl':
+ return input_string
+ elif backend == 'cryptography':
+ for date_fmt in ['%Y%m%d%H%M%SZ', '%Y%m%d%H%MZ', '%Y%m%d%H%M%S%z', '%Y%m%d%H%M%z']:
+ try:
+ return datetime.datetime.strptime(result, date_fmt)
+ except ValueError:
+ pass
+
+ raise OpenSSLObjectError(
+ 'The time spec "%s" for %s is invalid' %
+ (input_string, input_name)
+ )
+
+
+def select_message_digest(digest_string):
+ digest = None
+ if digest_string == 'sha256':
+ digest = hashes.SHA256()
+ elif digest_string == 'sha384':
+ digest = hashes.SHA384()
+ elif digest_string == 'sha512':
+ digest = hashes.SHA512()
+ elif digest_string == 'sha1':
+ digest = hashes.SHA1()
+ elif digest_string == 'md5':
+ digest = hashes.MD5()
+ return digest
+
+
+def write_file(module, content, default_mode=None, path=None):
+ '''
+ Writes content into destination file as securely as possible.
+ Uses file arguments from module.
+ '''
+ # Find out parameters for file
+ file_args = module.load_file_common_arguments(module.params, path=path)
+ if file_args['mode'] is None:
+ file_args['mode'] = default_mode
+ # Create tempfile name
+ tmp_fd, tmp_name = tempfile.mkstemp(prefix=b'.ansible_tmp')
+ try:
+ os.close(tmp_fd)
+ except Exception as dummy:
+ pass
+ module.add_cleanup_file(tmp_name) # if we fail, let Ansible try to remove the file
+ try:
+ try:
+ # Create tempfile
+ file = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ os.write(file, content)
+ os.close(file)
+ except Exception as e:
+ try:
+ os.remove(tmp_name)
+ except Exception as dummy:
+ pass
+ module.fail_json(msg='Error while writing result into temporary file: {0}'.format(e))
+ # Update destination to wanted permissions
+ if os.path.exists(file_args['path']):
+ module.set_fs_attributes_if_different(file_args, False)
+ # Move tempfile to final destination
+ module.atomic_move(tmp_name, file_args['path'])
+ # Try to update permissions again
+ module.set_fs_attributes_if_different(file_args, False)
+ except Exception as e:
+ try:
+ os.remove(tmp_name)
+ except Exception as dummy:
+ pass
+ module.fail_json(msg='Error while writing result: {0}'.format(e))
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpenSSLObject(object):
+
+ def __init__(self, path, state, force, check_mode):
+ self.path = path
+ self.state = state
+ self.force = force
+ self.name = os.path.basename(path)
+ self.changed = False
+ self.check_mode = check_mode
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ def _check_state():
+ return os.path.exists(self.path)
+
+ def _check_perms(module):
+ file_args = module.load_file_common_arguments(module.params)
+ return not module.set_fs_attributes_if_different(file_args, False)
+
+ if not perms_required:
+ return _check_state()
+
+ return _check_state() and _check_perms(module)
+
+ @abc.abstractmethod
+ def dump(self):
+ """Serialize the object into a dictionary."""
+
+ pass
+
+ @abc.abstractmethod
+ def generate(self):
+ """Generate the resource."""
+
+ pass
+
+ def remove(self, module):
+ """Remove the resource from the filesystem."""
+
+ try:
+ os.remove(self.path)
+ self.changed = True
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise OpenSSLObjectError(exc)
+ else:
+ pass
+
+
+# #####################################################################################
+# #####################################################################################
+# This has been extracted from the OpenSSL project's objects.txt:
+# https://github.com/openssl/openssl/blob/9537fe5757bb07761fa275d779bbd40bcf5530e4/crypto/objects/objects.txt
+# Extracted with https://gist.github.com/felixfontein/376748017ad65ead093d56a45a5bf376
+#
+# In case the following data structure has any copyrightable content, note that it is licensed as follows:
+# Copyright (c) the OpenSSL contributors
+# Licensed under the Apache License 2.0
+# https://github.com/openssl/openssl/blob/master/LICENSE
+_OID_MAP = {
+ '0': ('itu-t', 'ITU-T', 'ccitt'),
+ '0.3.4401.5': ('ntt-ds', ),
+ '0.3.4401.5.3.1.9': ('camellia', ),
+ '0.3.4401.5.3.1.9.1': ('camellia-128-ecb', 'CAMELLIA-128-ECB'),
+ '0.3.4401.5.3.1.9.3': ('camellia-128-ofb', 'CAMELLIA-128-OFB'),
+ '0.3.4401.5.3.1.9.4': ('camellia-128-cfb', 'CAMELLIA-128-CFB'),
+ '0.3.4401.5.3.1.9.6': ('camellia-128-gcm', 'CAMELLIA-128-GCM'),
+ '0.3.4401.5.3.1.9.7': ('camellia-128-ccm', 'CAMELLIA-128-CCM'),
+ '0.3.4401.5.3.1.9.9': ('camellia-128-ctr', 'CAMELLIA-128-CTR'),
+ '0.3.4401.5.3.1.9.10': ('camellia-128-cmac', 'CAMELLIA-128-CMAC'),
+ '0.3.4401.5.3.1.9.21': ('camellia-192-ecb', 'CAMELLIA-192-ECB'),
+ '0.3.4401.5.3.1.9.23': ('camellia-192-ofb', 'CAMELLIA-192-OFB'),
+ '0.3.4401.5.3.1.9.24': ('camellia-192-cfb', 'CAMELLIA-192-CFB'),
+ '0.3.4401.5.3.1.9.26': ('camellia-192-gcm', 'CAMELLIA-192-GCM'),
+ '0.3.4401.5.3.1.9.27': ('camellia-192-ccm', 'CAMELLIA-192-CCM'),
+ '0.3.4401.5.3.1.9.29': ('camellia-192-ctr', 'CAMELLIA-192-CTR'),
+ '0.3.4401.5.3.1.9.30': ('camellia-192-cmac', 'CAMELLIA-192-CMAC'),
+ '0.3.4401.5.3.1.9.41': ('camellia-256-ecb', 'CAMELLIA-256-ECB'),
+ '0.3.4401.5.3.1.9.43': ('camellia-256-ofb', 'CAMELLIA-256-OFB'),
+ '0.3.4401.5.3.1.9.44': ('camellia-256-cfb', 'CAMELLIA-256-CFB'),
+ '0.3.4401.5.3.1.9.46': ('camellia-256-gcm', 'CAMELLIA-256-GCM'),
+ '0.3.4401.5.3.1.9.47': ('camellia-256-ccm', 'CAMELLIA-256-CCM'),
+ '0.3.4401.5.3.1.9.49': ('camellia-256-ctr', 'CAMELLIA-256-CTR'),
+ '0.3.4401.5.3.1.9.50': ('camellia-256-cmac', 'CAMELLIA-256-CMAC'),
+ '0.9': ('data', ),
+ '0.9.2342': ('pss', ),
+ '0.9.2342.19200300': ('ucl', ),
+ '0.9.2342.19200300.100': ('pilot', ),
+ '0.9.2342.19200300.100.1': ('pilotAttributeType', ),
+ '0.9.2342.19200300.100.1.1': ('userId', 'UID'),
+ '0.9.2342.19200300.100.1.2': ('textEncodedORAddress', ),
+ '0.9.2342.19200300.100.1.3': ('rfc822Mailbox', 'mail'),
+ '0.9.2342.19200300.100.1.4': ('info', ),
+ '0.9.2342.19200300.100.1.5': ('favouriteDrink', ),
+ '0.9.2342.19200300.100.1.6': ('roomNumber', ),
+ '0.9.2342.19200300.100.1.7': ('photo', ),
+ '0.9.2342.19200300.100.1.8': ('userClass', ),
+ '0.9.2342.19200300.100.1.9': ('host', ),
+ '0.9.2342.19200300.100.1.10': ('manager', ),
+ '0.9.2342.19200300.100.1.11': ('documentIdentifier', ),
+ '0.9.2342.19200300.100.1.12': ('documentTitle', ),
+ '0.9.2342.19200300.100.1.13': ('documentVersion', ),
+ '0.9.2342.19200300.100.1.14': ('documentAuthor', ),
+ '0.9.2342.19200300.100.1.15': ('documentLocation', ),
+ '0.9.2342.19200300.100.1.20': ('homeTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.21': ('secretary', ),
+ '0.9.2342.19200300.100.1.22': ('otherMailbox', ),
+ '0.9.2342.19200300.100.1.23': ('lastModifiedTime', ),
+ '0.9.2342.19200300.100.1.24': ('lastModifiedBy', ),
+ '0.9.2342.19200300.100.1.25': ('domainComponent', 'DC'),
+ '0.9.2342.19200300.100.1.26': ('aRecord', ),
+ '0.9.2342.19200300.100.1.27': ('pilotAttributeType27', ),
+ '0.9.2342.19200300.100.1.28': ('mXRecord', ),
+ '0.9.2342.19200300.100.1.29': ('nSRecord', ),
+ '0.9.2342.19200300.100.1.30': ('sOARecord', ),
+ '0.9.2342.19200300.100.1.31': ('cNAMERecord', ),
+ '0.9.2342.19200300.100.1.37': ('associatedDomain', ),
+ '0.9.2342.19200300.100.1.38': ('associatedName', ),
+ '0.9.2342.19200300.100.1.39': ('homePostalAddress', ),
+ '0.9.2342.19200300.100.1.40': ('personalTitle', ),
+ '0.9.2342.19200300.100.1.41': ('mobileTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.42': ('pagerTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.43': ('friendlyCountryName', ),
+ '0.9.2342.19200300.100.1.44': ('uniqueIdentifier', 'uid'),
+ '0.9.2342.19200300.100.1.45': ('organizationalStatus', ),
+ '0.9.2342.19200300.100.1.46': ('janetMailbox', ),
+ '0.9.2342.19200300.100.1.47': ('mailPreferenceOption', ),
+ '0.9.2342.19200300.100.1.48': ('buildingName', ),
+ '0.9.2342.19200300.100.1.49': ('dSAQuality', ),
+ '0.9.2342.19200300.100.1.50': ('singleLevelQuality', ),
+ '0.9.2342.19200300.100.1.51': ('subtreeMinimumQuality', ),
+ '0.9.2342.19200300.100.1.52': ('subtreeMaximumQuality', ),
+ '0.9.2342.19200300.100.1.53': ('personalSignature', ),
+ '0.9.2342.19200300.100.1.54': ('dITRedirect', ),
+ '0.9.2342.19200300.100.1.55': ('audio', ),
+ '0.9.2342.19200300.100.1.56': ('documentPublisher', ),
+ '0.9.2342.19200300.100.3': ('pilotAttributeSyntax', ),
+ '0.9.2342.19200300.100.3.4': ('iA5StringSyntax', ),
+ '0.9.2342.19200300.100.3.5': ('caseIgnoreIA5StringSyntax', ),
+ '0.9.2342.19200300.100.4': ('pilotObjectClass', ),
+ '0.9.2342.19200300.100.4.3': ('pilotObject', ),
+ '0.9.2342.19200300.100.4.4': ('pilotPerson', ),
+ '0.9.2342.19200300.100.4.5': ('account', ),
+ '0.9.2342.19200300.100.4.6': ('document', ),
+ '0.9.2342.19200300.100.4.7': ('room', ),
+ '0.9.2342.19200300.100.4.9': ('documentSeries', ),
+ '0.9.2342.19200300.100.4.13': ('Domain', 'domain'),
+ '0.9.2342.19200300.100.4.14': ('rFC822localPart', ),
+ '0.9.2342.19200300.100.4.15': ('dNSDomain', ),
+ '0.9.2342.19200300.100.4.17': ('domainRelatedObject', ),
+ '0.9.2342.19200300.100.4.18': ('friendlyCountry', ),
+ '0.9.2342.19200300.100.4.19': ('simpleSecurityObject', ),
+ '0.9.2342.19200300.100.4.20': ('pilotOrganization', ),
+ '0.9.2342.19200300.100.4.21': ('pilotDSA', ),
+ '0.9.2342.19200300.100.4.22': ('qualityLabelledData', ),
+ '0.9.2342.19200300.100.10': ('pilotGroups', ),
+ '1': ('iso', 'ISO'),
+ '1.0.9797.3.4': ('gmac', 'GMAC'),
+ '1.0.10118.3.0.55': ('whirlpool', ),
+ '1.2': ('ISO Member Body', 'member-body'),
+ '1.2.156': ('ISO CN Member Body', 'ISO-CN'),
+ '1.2.156.10197': ('oscca', ),
+ '1.2.156.10197.1': ('sm-scheme', ),
+ '1.2.156.10197.1.104.1': ('sm4-ecb', 'SM4-ECB'),
+ '1.2.156.10197.1.104.2': ('sm4-cbc', 'SM4-CBC'),
+ '1.2.156.10197.1.104.3': ('sm4-ofb', 'SM4-OFB'),
+ '1.2.156.10197.1.104.4': ('sm4-cfb', 'SM4-CFB'),
+ '1.2.156.10197.1.104.5': ('sm4-cfb1', 'SM4-CFB1'),
+ '1.2.156.10197.1.104.6': ('sm4-cfb8', 'SM4-CFB8'),
+ '1.2.156.10197.1.104.7': ('sm4-ctr', 'SM4-CTR'),
+ '1.2.156.10197.1.301': ('sm2', 'SM2'),
+ '1.2.156.10197.1.401': ('sm3', 'SM3'),
+ '1.2.156.10197.1.501': ('SM2-with-SM3', 'SM2-SM3'),
+ '1.2.156.10197.1.504': ('sm3WithRSAEncryption', 'RSA-SM3'),
+ '1.2.392.200011.61.1.1.1.2': ('camellia-128-cbc', 'CAMELLIA-128-CBC'),
+ '1.2.392.200011.61.1.1.1.3': ('camellia-192-cbc', 'CAMELLIA-192-CBC'),
+ '1.2.392.200011.61.1.1.1.4': ('camellia-256-cbc', 'CAMELLIA-256-CBC'),
+ '1.2.392.200011.61.1.1.3.2': ('id-camellia128-wrap', ),
+ '1.2.392.200011.61.1.1.3.3': ('id-camellia192-wrap', ),
+ '1.2.392.200011.61.1.1.3.4': ('id-camellia256-wrap', ),
+ '1.2.410.200004': ('kisa', 'KISA'),
+ '1.2.410.200004.1.3': ('seed-ecb', 'SEED-ECB'),
+ '1.2.410.200004.1.4': ('seed-cbc', 'SEED-CBC'),
+ '1.2.410.200004.1.5': ('seed-cfb', 'SEED-CFB'),
+ '1.2.410.200004.1.6': ('seed-ofb', 'SEED-OFB'),
+ '1.2.410.200046.1.1': ('aria', ),
+ '1.2.410.200046.1.1.1': ('aria-128-ecb', 'ARIA-128-ECB'),
+ '1.2.410.200046.1.1.2': ('aria-128-cbc', 'ARIA-128-CBC'),
+ '1.2.410.200046.1.1.3': ('aria-128-cfb', 'ARIA-128-CFB'),
+ '1.2.410.200046.1.1.4': ('aria-128-ofb', 'ARIA-128-OFB'),
+ '1.2.410.200046.1.1.5': ('aria-128-ctr', 'ARIA-128-CTR'),
+ '1.2.410.200046.1.1.6': ('aria-192-ecb', 'ARIA-192-ECB'),
+ '1.2.410.200046.1.1.7': ('aria-192-cbc', 'ARIA-192-CBC'),
+ '1.2.410.200046.1.1.8': ('aria-192-cfb', 'ARIA-192-CFB'),
+ '1.2.410.200046.1.1.9': ('aria-192-ofb', 'ARIA-192-OFB'),
+ '1.2.410.200046.1.1.10': ('aria-192-ctr', 'ARIA-192-CTR'),
+ '1.2.410.200046.1.1.11': ('aria-256-ecb', 'ARIA-256-ECB'),
+ '1.2.410.200046.1.1.12': ('aria-256-cbc', 'ARIA-256-CBC'),
+ '1.2.410.200046.1.1.13': ('aria-256-cfb', 'ARIA-256-CFB'),
+ '1.2.410.200046.1.1.14': ('aria-256-ofb', 'ARIA-256-OFB'),
+ '1.2.410.200046.1.1.15': ('aria-256-ctr', 'ARIA-256-CTR'),
+ '1.2.410.200046.1.1.34': ('aria-128-gcm', 'ARIA-128-GCM'),
+ '1.2.410.200046.1.1.35': ('aria-192-gcm', 'ARIA-192-GCM'),
+ '1.2.410.200046.1.1.36': ('aria-256-gcm', 'ARIA-256-GCM'),
+ '1.2.410.200046.1.1.37': ('aria-128-ccm', 'ARIA-128-CCM'),
+ '1.2.410.200046.1.1.38': ('aria-192-ccm', 'ARIA-192-CCM'),
+ '1.2.410.200046.1.1.39': ('aria-256-ccm', 'ARIA-256-CCM'),
+ '1.2.643.2.2': ('cryptopro', ),
+ '1.2.643.2.2.3': ('GOST R 34.11-94 with GOST R 34.10-2001', 'id-GostR3411-94-with-GostR3410-2001'),
+ '1.2.643.2.2.4': ('GOST R 34.11-94 with GOST R 34.10-94', 'id-GostR3411-94-with-GostR3410-94'),
+ '1.2.643.2.2.9': ('GOST R 34.11-94', 'md_gost94'),
+ '1.2.643.2.2.10': ('HMAC GOST 34.11-94', 'id-HMACGostR3411-94'),
+ '1.2.643.2.2.14.0': ('id-Gost28147-89-None-KeyMeshing', ),
+ '1.2.643.2.2.14.1': ('id-Gost28147-89-CryptoPro-KeyMeshing', ),
+ '1.2.643.2.2.19': ('GOST R 34.10-2001', 'gost2001'),
+ '1.2.643.2.2.20': ('GOST R 34.10-94', 'gost94'),
+ '1.2.643.2.2.20.1': ('id-GostR3410-94-a', ),
+ '1.2.643.2.2.20.2': ('id-GostR3410-94-aBis', ),
+ '1.2.643.2.2.20.3': ('id-GostR3410-94-b', ),
+ '1.2.643.2.2.20.4': ('id-GostR3410-94-bBis', ),
+ '1.2.643.2.2.21': ('GOST 28147-89', 'gost89'),
+ '1.2.643.2.2.22': ('GOST 28147-89 MAC', 'gost-mac'),
+ '1.2.643.2.2.23': ('GOST R 34.11-94 PRF', 'prf-gostr3411-94'),
+ '1.2.643.2.2.30.0': ('id-GostR3411-94-TestParamSet', ),
+ '1.2.643.2.2.30.1': ('id-GostR3411-94-CryptoProParamSet', ),
+ '1.2.643.2.2.31.0': ('id-Gost28147-89-TestParamSet', ),
+ '1.2.643.2.2.31.1': ('id-Gost28147-89-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.31.2': ('id-Gost28147-89-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.31.3': ('id-Gost28147-89-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.31.4': ('id-Gost28147-89-CryptoPro-D-ParamSet', ),
+ '1.2.643.2.2.31.5': ('id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet', ),
+ '1.2.643.2.2.31.6': ('id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet', ),
+ '1.2.643.2.2.31.7': ('id-Gost28147-89-CryptoPro-RIC-1-ParamSet', ),
+ '1.2.643.2.2.32.0': ('id-GostR3410-94-TestParamSet', ),
+ '1.2.643.2.2.32.2': ('id-GostR3410-94-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.32.3': ('id-GostR3410-94-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.32.4': ('id-GostR3410-94-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.32.5': ('id-GostR3410-94-CryptoPro-D-ParamSet', ),
+ '1.2.643.2.2.33.1': ('id-GostR3410-94-CryptoPro-XchA-ParamSet', ),
+ '1.2.643.2.2.33.2': ('id-GostR3410-94-CryptoPro-XchB-ParamSet', ),
+ '1.2.643.2.2.33.3': ('id-GostR3410-94-CryptoPro-XchC-ParamSet', ),
+ '1.2.643.2.2.35.0': ('id-GostR3410-2001-TestParamSet', ),
+ '1.2.643.2.2.35.1': ('id-GostR3410-2001-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.35.2': ('id-GostR3410-2001-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.35.3': ('id-GostR3410-2001-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.36.0': ('id-GostR3410-2001-CryptoPro-XchA-ParamSet', ),
+ '1.2.643.2.2.36.1': ('id-GostR3410-2001-CryptoPro-XchB-ParamSet', ),
+ '1.2.643.2.2.98': ('GOST R 34.10-2001 DH', 'id-GostR3410-2001DH'),
+ '1.2.643.2.2.99': ('GOST R 34.10-94 DH', 'id-GostR3410-94DH'),
+ '1.2.643.2.9': ('cryptocom', ),
+ '1.2.643.2.9.1.3.3': ('GOST R 34.11-94 with GOST R 34.10-94 Cryptocom', 'id-GostR3411-94-with-GostR3410-94-cc'),
+ '1.2.643.2.9.1.3.4': ('GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom', 'id-GostR3411-94-with-GostR3410-2001-cc'),
+ '1.2.643.2.9.1.5.3': ('GOST 34.10-94 Cryptocom', 'gost94cc'),
+ '1.2.643.2.9.1.5.4': ('GOST 34.10-2001 Cryptocom', 'gost2001cc'),
+ '1.2.643.2.9.1.6.1': ('GOST 28147-89 Cryptocom ParamSet', 'id-Gost28147-89-cc'),
+ '1.2.643.2.9.1.8.1': ('GOST R 3410-2001 Parameter Set Cryptocom', 'id-GostR3410-2001-ParamSet-cc'),
+ '1.2.643.3.131.1.1': ('INN', 'INN'),
+ '1.2.643.7.1': ('id-tc26', ),
+ '1.2.643.7.1.1': ('id-tc26-algorithms', ),
+ '1.2.643.7.1.1.1': ('id-tc26-sign', ),
+ '1.2.643.7.1.1.1.1': ('GOST R 34.10-2012 with 256 bit modulus', 'gost2012_256'),
+ '1.2.643.7.1.1.1.2': ('GOST R 34.10-2012 with 512 bit modulus', 'gost2012_512'),
+ '1.2.643.7.1.1.2': ('id-tc26-digest', ),
+ '1.2.643.7.1.1.2.2': ('GOST R 34.11-2012 with 256 bit hash', 'md_gost12_256'),
+ '1.2.643.7.1.1.2.3': ('GOST R 34.11-2012 with 512 bit hash', 'md_gost12_512'),
+ '1.2.643.7.1.1.3': ('id-tc26-signwithdigest', ),
+ '1.2.643.7.1.1.3.2': ('GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)', 'id-tc26-signwithdigest-gost3410-2012-256'),
+ '1.2.643.7.1.1.3.3': ('GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)', 'id-tc26-signwithdigest-gost3410-2012-512'),
+ '1.2.643.7.1.1.4': ('id-tc26-mac', ),
+ '1.2.643.7.1.1.4.1': ('HMAC GOST 34.11-2012 256 bit', 'id-tc26-hmac-gost-3411-2012-256'),
+ '1.2.643.7.1.1.4.2': ('HMAC GOST 34.11-2012 512 bit', 'id-tc26-hmac-gost-3411-2012-512'),
+ '1.2.643.7.1.1.5': ('id-tc26-cipher', ),
+ '1.2.643.7.1.1.5.1': ('id-tc26-cipher-gostr3412-2015-magma', ),
+ '1.2.643.7.1.1.5.1.1': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm', ),
+ '1.2.643.7.1.1.5.1.2': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac', ),
+ '1.2.643.7.1.1.5.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik', ),
+ '1.2.643.7.1.1.5.2.1': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm', ),
+ '1.2.643.7.1.1.5.2.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac', ),
+ '1.2.643.7.1.1.6': ('id-tc26-agreement', ),
+ '1.2.643.7.1.1.6.1': ('id-tc26-agreement-gost-3410-2012-256', ),
+ '1.2.643.7.1.1.6.2': ('id-tc26-agreement-gost-3410-2012-512', ),
+ '1.2.643.7.1.1.7': ('id-tc26-wrap', ),
+ '1.2.643.7.1.1.7.1': ('id-tc26-wrap-gostr3412-2015-magma', ),
+ '1.2.643.7.1.1.7.1.1': ('id-tc26-wrap-gostr3412-2015-magma-kexp15', 'id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15'),
+ '1.2.643.7.1.1.7.2': ('id-tc26-wrap-gostr3412-2015-kuznyechik', ),
+ '1.2.643.7.1.2': ('id-tc26-constants', ),
+ '1.2.643.7.1.2.1': ('id-tc26-sign-constants', ),
+ '1.2.643.7.1.2.1.1': ('id-tc26-gost-3410-2012-256-constants', ),
+ '1.2.643.7.1.2.1.1.1': ('GOST R 34.10-2012 (256 bit) ParamSet A', 'id-tc26-gost-3410-2012-256-paramSetA'),
+ '1.2.643.7.1.2.1.1.2': ('GOST R 34.10-2012 (256 bit) ParamSet B', 'id-tc26-gost-3410-2012-256-paramSetB'),
+ '1.2.643.7.1.2.1.1.3': ('GOST R 34.10-2012 (256 bit) ParamSet C', 'id-tc26-gost-3410-2012-256-paramSetC'),
+ '1.2.643.7.1.2.1.1.4': ('GOST R 34.10-2012 (256 bit) ParamSet D', 'id-tc26-gost-3410-2012-256-paramSetD'),
+ '1.2.643.7.1.2.1.2': ('id-tc26-gost-3410-2012-512-constants', ),
+ '1.2.643.7.1.2.1.2.0': ('GOST R 34.10-2012 (512 bit) testing parameter set', 'id-tc26-gost-3410-2012-512-paramSetTest'),
+ '1.2.643.7.1.2.1.2.1': ('GOST R 34.10-2012 (512 bit) ParamSet A', 'id-tc26-gost-3410-2012-512-paramSetA'),
+ '1.2.643.7.1.2.1.2.2': ('GOST R 34.10-2012 (512 bit) ParamSet B', 'id-tc26-gost-3410-2012-512-paramSetB'),
+ '1.2.643.7.1.2.1.2.3': ('GOST R 34.10-2012 (512 bit) ParamSet C', 'id-tc26-gost-3410-2012-512-paramSetC'),
+ '1.2.643.7.1.2.2': ('id-tc26-digest-constants', ),
+ '1.2.643.7.1.2.5': ('id-tc26-cipher-constants', ),
+ '1.2.643.7.1.2.5.1': ('id-tc26-gost-28147-constants', ),
+ '1.2.643.7.1.2.5.1.1': ('GOST 28147-89 TC26 parameter set', 'id-tc26-gost-28147-param-Z'),
+ '1.2.643.100.1': ('OGRN', 'OGRN'),
+ '1.2.643.100.3': ('SNILS', 'SNILS'),
+ '1.2.643.100.111': ('Signing Tool of Subject', 'subjectSignTool'),
+ '1.2.643.100.112': ('Signing Tool of Issuer', 'issuerSignTool'),
+ '1.2.804': ('ISO-UA', ),
+ '1.2.804.2.1.1.1': ('ua-pki', ),
+ '1.2.804.2.1.1.1.1.1.1': ('DSTU Gost 28147-2009', 'dstu28147'),
+ '1.2.804.2.1.1.1.1.1.1.2': ('DSTU Gost 28147-2009 OFB mode', 'dstu28147-ofb'),
+ '1.2.804.2.1.1.1.1.1.1.3': ('DSTU Gost 28147-2009 CFB mode', 'dstu28147-cfb'),
+ '1.2.804.2.1.1.1.1.1.1.5': ('DSTU Gost 28147-2009 key wrap', 'dstu28147-wrap'),
+ '1.2.804.2.1.1.1.1.1.2': ('HMAC DSTU Gost 34311-95', 'hmacWithDstu34311'),
+ '1.2.804.2.1.1.1.1.2.1': ('DSTU Gost 34311-95', 'dstu34311'),
+ '1.2.804.2.1.1.1.1.3.1.1': ('DSTU 4145-2002 little endian', 'dstu4145le'),
+ '1.2.804.2.1.1.1.1.3.1.1.1.1': ('DSTU 4145-2002 big endian', 'dstu4145be'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.0': ('DSTU curve 0', 'uacurve0'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.1': ('DSTU curve 1', 'uacurve1'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.2': ('DSTU curve 2', 'uacurve2'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.3': ('DSTU curve 3', 'uacurve3'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.4': ('DSTU curve 4', 'uacurve4'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.5': ('DSTU curve 5', 'uacurve5'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.6': ('DSTU curve 6', 'uacurve6'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.7': ('DSTU curve 7', 'uacurve7'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.8': ('DSTU curve 8', 'uacurve8'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.9': ('DSTU curve 9', 'uacurve9'),
+ '1.2.840': ('ISO US Member Body', 'ISO-US'),
+ '1.2.840.10040': ('X9.57', 'X9-57'),
+ '1.2.840.10040.2': ('holdInstruction', ),
+ '1.2.840.10040.2.1': ('Hold Instruction None', 'holdInstructionNone'),
+ '1.2.840.10040.2.2': ('Hold Instruction Call Issuer', 'holdInstructionCallIssuer'),
+ '1.2.840.10040.2.3': ('Hold Instruction Reject', 'holdInstructionReject'),
+ '1.2.840.10040.4': ('X9.57 CM ?', 'X9cm'),
+ '1.2.840.10040.4.1': ('dsaEncryption', 'DSA'),
+ '1.2.840.10040.4.3': ('dsaWithSHA1', 'DSA-SHA1'),
+ '1.2.840.10045': ('ANSI X9.62', 'ansi-X9-62'),
+ '1.2.840.10045.1': ('id-fieldType', ),
+ '1.2.840.10045.1.1': ('prime-field', ),
+ '1.2.840.10045.1.2': ('characteristic-two-field', ),
+ '1.2.840.10045.1.2.3': ('id-characteristic-two-basis', ),
+ '1.2.840.10045.1.2.3.1': ('onBasis', ),
+ '1.2.840.10045.1.2.3.2': ('tpBasis', ),
+ '1.2.840.10045.1.2.3.3': ('ppBasis', ),
+ '1.2.840.10045.2': ('id-publicKeyType', ),
+ '1.2.840.10045.2.1': ('id-ecPublicKey', ),
+ '1.2.840.10045.3': ('ellipticCurve', ),
+ '1.2.840.10045.3.0': ('c-TwoCurve', ),
+ '1.2.840.10045.3.0.1': ('c2pnb163v1', ),
+ '1.2.840.10045.3.0.2': ('c2pnb163v2', ),
+ '1.2.840.10045.3.0.3': ('c2pnb163v3', ),
+ '1.2.840.10045.3.0.4': ('c2pnb176v1', ),
+ '1.2.840.10045.3.0.5': ('c2tnb191v1', ),
+ '1.2.840.10045.3.0.6': ('c2tnb191v2', ),
+ '1.2.840.10045.3.0.7': ('c2tnb191v3', ),
+ '1.2.840.10045.3.0.8': ('c2onb191v4', ),
+ '1.2.840.10045.3.0.9': ('c2onb191v5', ),
+ '1.2.840.10045.3.0.10': ('c2pnb208w1', ),
+ '1.2.840.10045.3.0.11': ('c2tnb239v1', ),
+ '1.2.840.10045.3.0.12': ('c2tnb239v2', ),
+ '1.2.840.10045.3.0.13': ('c2tnb239v3', ),
+ '1.2.840.10045.3.0.14': ('c2onb239v4', ),
+ '1.2.840.10045.3.0.15': ('c2onb239v5', ),
+ '1.2.840.10045.3.0.16': ('c2pnb272w1', ),
+ '1.2.840.10045.3.0.17': ('c2pnb304w1', ),
+ '1.2.840.10045.3.0.18': ('c2tnb359v1', ),
+ '1.2.840.10045.3.0.19': ('c2pnb368w1', ),
+ '1.2.840.10045.3.0.20': ('c2tnb431r1', ),
+ '1.2.840.10045.3.1': ('primeCurve', ),
+ '1.2.840.10045.3.1.1': ('prime192v1', ),
+ '1.2.840.10045.3.1.2': ('prime192v2', ),
+ '1.2.840.10045.3.1.3': ('prime192v3', ),
+ '1.2.840.10045.3.1.4': ('prime239v1', ),
+ '1.2.840.10045.3.1.5': ('prime239v2', ),
+ '1.2.840.10045.3.1.6': ('prime239v3', ),
+ '1.2.840.10045.3.1.7': ('prime256v1', ),
+ '1.2.840.10045.4': ('id-ecSigType', ),
+ '1.2.840.10045.4.1': ('ecdsa-with-SHA1', ),
+ '1.2.840.10045.4.2': ('ecdsa-with-Recommended', ),
+ '1.2.840.10045.4.3': ('ecdsa-with-Specified', ),
+ '1.2.840.10045.4.3.1': ('ecdsa-with-SHA224', ),
+ '1.2.840.10045.4.3.2': ('ecdsa-with-SHA256', ),
+ '1.2.840.10045.4.3.3': ('ecdsa-with-SHA384', ),
+ '1.2.840.10045.4.3.4': ('ecdsa-with-SHA512', ),
+ '1.2.840.10046.2.1': ('X9.42 DH', 'dhpublicnumber'),
+ '1.2.840.113533.7.66.10': ('cast5-cbc', 'CAST5-CBC'),
+ '1.2.840.113533.7.66.12': ('pbeWithMD5AndCast5CBC', ),
+ '1.2.840.113533.7.66.13': ('password based MAC', 'id-PasswordBasedMAC'),
+ '1.2.840.113533.7.66.30': ('Diffie-Hellman based MAC', 'id-DHBasedMac'),
+ '1.2.840.113549': ('RSA Data Security, Inc.', 'rsadsi'),
+ '1.2.840.113549.1': ('RSA Data Security, Inc. PKCS', 'pkcs'),
+ '1.2.840.113549.1.1': ('pkcs1', ),
+ '1.2.840.113549.1.1.1': ('rsaEncryption', ),
+ '1.2.840.113549.1.1.2': ('md2WithRSAEncryption', 'RSA-MD2'),
+ '1.2.840.113549.1.1.3': ('md4WithRSAEncryption', 'RSA-MD4'),
+ '1.2.840.113549.1.1.4': ('md5WithRSAEncryption', 'RSA-MD5'),
+ '1.2.840.113549.1.1.5': ('sha1WithRSAEncryption', 'RSA-SHA1'),
+ '1.2.840.113549.1.1.6': ('rsaOAEPEncryptionSET', ),
+ '1.2.840.113549.1.1.7': ('rsaesOaep', 'RSAES-OAEP'),
+ '1.2.840.113549.1.1.8': ('mgf1', 'MGF1'),
+ '1.2.840.113549.1.1.9': ('pSpecified', 'PSPECIFIED'),
+ '1.2.840.113549.1.1.10': ('rsassaPss', 'RSASSA-PSS'),
+ '1.2.840.113549.1.1.11': ('sha256WithRSAEncryption', 'RSA-SHA256'),
+ '1.2.840.113549.1.1.12': ('sha384WithRSAEncryption', 'RSA-SHA384'),
+ '1.2.840.113549.1.1.13': ('sha512WithRSAEncryption', 'RSA-SHA512'),
+ '1.2.840.113549.1.1.14': ('sha224WithRSAEncryption', 'RSA-SHA224'),
+ '1.2.840.113549.1.1.15': ('sha512-224WithRSAEncryption', 'RSA-SHA512/224'),
+ '1.2.840.113549.1.1.16': ('sha512-256WithRSAEncryption', 'RSA-SHA512/256'),
+ '1.2.840.113549.1.3': ('pkcs3', ),
+ '1.2.840.113549.1.3.1': ('dhKeyAgreement', ),
+ '1.2.840.113549.1.5': ('pkcs5', ),
+ '1.2.840.113549.1.5.1': ('pbeWithMD2AndDES-CBC', 'PBE-MD2-DES'),
+ '1.2.840.113549.1.5.3': ('pbeWithMD5AndDES-CBC', 'PBE-MD5-DES'),
+ '1.2.840.113549.1.5.4': ('pbeWithMD2AndRC2-CBC', 'PBE-MD2-RC2-64'),
+ '1.2.840.113549.1.5.6': ('pbeWithMD5AndRC2-CBC', 'PBE-MD5-RC2-64'),
+ '1.2.840.113549.1.5.10': ('pbeWithSHA1AndDES-CBC', 'PBE-SHA1-DES'),
+ '1.2.840.113549.1.5.11': ('pbeWithSHA1AndRC2-CBC', 'PBE-SHA1-RC2-64'),
+ '1.2.840.113549.1.5.12': ('PBKDF2', ),
+ '1.2.840.113549.1.5.13': ('PBES2', ),
+ '1.2.840.113549.1.5.14': ('PBMAC1', ),
+ '1.2.840.113549.1.7': ('pkcs7', ),
+ '1.2.840.113549.1.7.1': ('pkcs7-data', ),
+ '1.2.840.113549.1.7.2': ('pkcs7-signedData', ),
+ '1.2.840.113549.1.7.3': ('pkcs7-envelopedData', ),
+ '1.2.840.113549.1.7.4': ('pkcs7-signedAndEnvelopedData', ),
+ '1.2.840.113549.1.7.5': ('pkcs7-digestData', ),
+ '1.2.840.113549.1.7.6': ('pkcs7-encryptedData', ),
+ '1.2.840.113549.1.9': ('pkcs9', ),
+ '1.2.840.113549.1.9.1': ('emailAddress', ),
+ '1.2.840.113549.1.9.2': ('unstructuredName', ),
+ '1.2.840.113549.1.9.3': ('contentType', ),
+ '1.2.840.113549.1.9.4': ('messageDigest', ),
+ '1.2.840.113549.1.9.5': ('signingTime', ),
+ '1.2.840.113549.1.9.6': ('countersignature', ),
+ '1.2.840.113549.1.9.7': ('challengePassword', ),
+ '1.2.840.113549.1.9.8': ('unstructuredAddress', ),
+ '1.2.840.113549.1.9.9': ('extendedCertificateAttributes', ),
+ '1.2.840.113549.1.9.14': ('Extension Request', 'extReq'),
+ '1.2.840.113549.1.9.15': ('S/MIME Capabilities', 'SMIME-CAPS'),
+ '1.2.840.113549.1.9.16': ('S/MIME', 'SMIME'),
+ '1.2.840.113549.1.9.16.0': ('id-smime-mod', ),
+ '1.2.840.113549.1.9.16.0.1': ('id-smime-mod-cms', ),
+ '1.2.840.113549.1.9.16.0.2': ('id-smime-mod-ess', ),
+ '1.2.840.113549.1.9.16.0.3': ('id-smime-mod-oid', ),
+ '1.2.840.113549.1.9.16.0.4': ('id-smime-mod-msg-v3', ),
+ '1.2.840.113549.1.9.16.0.5': ('id-smime-mod-ets-eSignature-88', ),
+ '1.2.840.113549.1.9.16.0.6': ('id-smime-mod-ets-eSignature-97', ),
+ '1.2.840.113549.1.9.16.0.7': ('id-smime-mod-ets-eSigPolicy-88', ),
+ '1.2.840.113549.1.9.16.0.8': ('id-smime-mod-ets-eSigPolicy-97', ),
+ '1.2.840.113549.1.9.16.1': ('id-smime-ct', ),
+ '1.2.840.113549.1.9.16.1.1': ('id-smime-ct-receipt', ),
+ '1.2.840.113549.1.9.16.1.2': ('id-smime-ct-authData', ),
+ '1.2.840.113549.1.9.16.1.3': ('id-smime-ct-publishCert', ),
+ '1.2.840.113549.1.9.16.1.4': ('id-smime-ct-TSTInfo', ),
+ '1.2.840.113549.1.9.16.1.5': ('id-smime-ct-TDTInfo', ),
+ '1.2.840.113549.1.9.16.1.6': ('id-smime-ct-contentInfo', ),
+ '1.2.840.113549.1.9.16.1.7': ('id-smime-ct-DVCSRequestData', ),
+ '1.2.840.113549.1.9.16.1.8': ('id-smime-ct-DVCSResponseData', ),
+ '1.2.840.113549.1.9.16.1.9': ('id-smime-ct-compressedData', ),
+ '1.2.840.113549.1.9.16.1.19': ('id-smime-ct-contentCollection', ),
+ '1.2.840.113549.1.9.16.1.23': ('id-smime-ct-authEnvelopedData', ),
+ '1.2.840.113549.1.9.16.1.27': ('id-ct-asciiTextWithCRLF', ),
+ '1.2.840.113549.1.9.16.1.28': ('id-ct-xml', ),
+ '1.2.840.113549.1.9.16.2': ('id-smime-aa', ),
+ '1.2.840.113549.1.9.16.2.1': ('id-smime-aa-receiptRequest', ),
+ '1.2.840.113549.1.9.16.2.2': ('id-smime-aa-securityLabel', ),
+ '1.2.840.113549.1.9.16.2.3': ('id-smime-aa-mlExpandHistory', ),
+ '1.2.840.113549.1.9.16.2.4': ('id-smime-aa-contentHint', ),
+ '1.2.840.113549.1.9.16.2.5': ('id-smime-aa-msgSigDigest', ),
+ '1.2.840.113549.1.9.16.2.6': ('id-smime-aa-encapContentType', ),
+ '1.2.840.113549.1.9.16.2.7': ('id-smime-aa-contentIdentifier', ),
+ '1.2.840.113549.1.9.16.2.8': ('id-smime-aa-macValue', ),
+ '1.2.840.113549.1.9.16.2.9': ('id-smime-aa-equivalentLabels', ),
+ '1.2.840.113549.1.9.16.2.10': ('id-smime-aa-contentReference', ),
+ '1.2.840.113549.1.9.16.2.11': ('id-smime-aa-encrypKeyPref', ),
+ '1.2.840.113549.1.9.16.2.12': ('id-smime-aa-signingCertificate', ),
+ '1.2.840.113549.1.9.16.2.13': ('id-smime-aa-smimeEncryptCerts', ),
+ '1.2.840.113549.1.9.16.2.14': ('id-smime-aa-timeStampToken', ),
+ '1.2.840.113549.1.9.16.2.15': ('id-smime-aa-ets-sigPolicyId', ),
+ '1.2.840.113549.1.9.16.2.16': ('id-smime-aa-ets-commitmentType', ),
+ '1.2.840.113549.1.9.16.2.17': ('id-smime-aa-ets-signerLocation', ),
+ '1.2.840.113549.1.9.16.2.18': ('id-smime-aa-ets-signerAttr', ),
+ '1.2.840.113549.1.9.16.2.19': ('id-smime-aa-ets-otherSigCert', ),
+ '1.2.840.113549.1.9.16.2.20': ('id-smime-aa-ets-contentTimestamp', ),
+ '1.2.840.113549.1.9.16.2.21': ('id-smime-aa-ets-CertificateRefs', ),
+ '1.2.840.113549.1.9.16.2.22': ('id-smime-aa-ets-RevocationRefs', ),
+ '1.2.840.113549.1.9.16.2.23': ('id-smime-aa-ets-certValues', ),
+ '1.2.840.113549.1.9.16.2.24': ('id-smime-aa-ets-revocationValues', ),
+ '1.2.840.113549.1.9.16.2.25': ('id-smime-aa-ets-escTimeStamp', ),
+ '1.2.840.113549.1.9.16.2.26': ('id-smime-aa-ets-certCRLTimestamp', ),
+ '1.2.840.113549.1.9.16.2.27': ('id-smime-aa-ets-archiveTimeStamp', ),
+ '1.2.840.113549.1.9.16.2.28': ('id-smime-aa-signatureType', ),
+ '1.2.840.113549.1.9.16.2.29': ('id-smime-aa-dvcs-dvc', ),
+ '1.2.840.113549.1.9.16.2.47': ('id-smime-aa-signingCertificateV2', ),
+ '1.2.840.113549.1.9.16.3': ('id-smime-alg', ),
+ '1.2.840.113549.1.9.16.3.1': ('id-smime-alg-ESDHwith3DES', ),
+ '1.2.840.113549.1.9.16.3.2': ('id-smime-alg-ESDHwithRC2', ),
+ '1.2.840.113549.1.9.16.3.3': ('id-smime-alg-3DESwrap', ),
+ '1.2.840.113549.1.9.16.3.4': ('id-smime-alg-RC2wrap', ),
+ '1.2.840.113549.1.9.16.3.5': ('id-smime-alg-ESDH', ),
+ '1.2.840.113549.1.9.16.3.6': ('id-smime-alg-CMS3DESwrap', ),
+ '1.2.840.113549.1.9.16.3.7': ('id-smime-alg-CMSRC2wrap', ),
+ '1.2.840.113549.1.9.16.3.8': ('zlib compression', 'ZLIB'),
+ '1.2.840.113549.1.9.16.3.9': ('id-alg-PWRI-KEK', ),
+ '1.2.840.113549.1.9.16.4': ('id-smime-cd', ),
+ '1.2.840.113549.1.9.16.4.1': ('id-smime-cd-ldap', ),
+ '1.2.840.113549.1.9.16.5': ('id-smime-spq', ),
+ '1.2.840.113549.1.9.16.5.1': ('id-smime-spq-ets-sqt-uri', ),
+ '1.2.840.113549.1.9.16.5.2': ('id-smime-spq-ets-sqt-unotice', ),
+ '1.2.840.113549.1.9.16.6': ('id-smime-cti', ),
+ '1.2.840.113549.1.9.16.6.1': ('id-smime-cti-ets-proofOfOrigin', ),
+ '1.2.840.113549.1.9.16.6.2': ('id-smime-cti-ets-proofOfReceipt', ),
+ '1.2.840.113549.1.9.16.6.3': ('id-smime-cti-ets-proofOfDelivery', ),
+ '1.2.840.113549.1.9.16.6.4': ('id-smime-cti-ets-proofOfSender', ),
+ '1.2.840.113549.1.9.16.6.5': ('id-smime-cti-ets-proofOfApproval', ),
+ '1.2.840.113549.1.9.16.6.6': ('id-smime-cti-ets-proofOfCreation', ),
+ '1.2.840.113549.1.9.20': ('friendlyName', ),
+ '1.2.840.113549.1.9.21': ('localKeyID', ),
+ '1.2.840.113549.1.9.22': ('certTypes', ),
+ '1.2.840.113549.1.9.22.1': ('x509Certificate', ),
+ '1.2.840.113549.1.9.22.2': ('sdsiCertificate', ),
+ '1.2.840.113549.1.9.23': ('crlTypes', ),
+ '1.2.840.113549.1.9.23.1': ('x509Crl', ),
+ '1.2.840.113549.1.12': ('pkcs12', ),
+ '1.2.840.113549.1.12.1': ('pkcs12-pbeids', ),
+ '1.2.840.113549.1.12.1.1': ('pbeWithSHA1And128BitRC4', 'PBE-SHA1-RC4-128'),
+ '1.2.840.113549.1.12.1.2': ('pbeWithSHA1And40BitRC4', 'PBE-SHA1-RC4-40'),
+ '1.2.840.113549.1.12.1.3': ('pbeWithSHA1And3-KeyTripleDES-CBC', 'PBE-SHA1-3DES'),
+ '1.2.840.113549.1.12.1.4': ('pbeWithSHA1And2-KeyTripleDES-CBC', 'PBE-SHA1-2DES'),
+ '1.2.840.113549.1.12.1.5': ('pbeWithSHA1And128BitRC2-CBC', 'PBE-SHA1-RC2-128'),
+ '1.2.840.113549.1.12.1.6': ('pbeWithSHA1And40BitRC2-CBC', 'PBE-SHA1-RC2-40'),
+ '1.2.840.113549.1.12.10': ('pkcs12-Version1', ),
+ '1.2.840.113549.1.12.10.1': ('pkcs12-BagIds', ),
+ '1.2.840.113549.1.12.10.1.1': ('keyBag', ),
+ '1.2.840.113549.1.12.10.1.2': ('pkcs8ShroudedKeyBag', ),
+ '1.2.840.113549.1.12.10.1.3': ('certBag', ),
+ '1.2.840.113549.1.12.10.1.4': ('crlBag', ),
+ '1.2.840.113549.1.12.10.1.5': ('secretBag', ),
+ '1.2.840.113549.1.12.10.1.6': ('safeContentsBag', ),
+ '1.2.840.113549.2.2': ('md2', 'MD2'),
+ '1.2.840.113549.2.4': ('md4', 'MD4'),
+ '1.2.840.113549.2.5': ('md5', 'MD5'),
+ '1.2.840.113549.2.6': ('hmacWithMD5', ),
+ '1.2.840.113549.2.7': ('hmacWithSHA1', ),
+ '1.2.840.113549.2.8': ('hmacWithSHA224', ),
+ '1.2.840.113549.2.9': ('hmacWithSHA256', ),
+ '1.2.840.113549.2.10': ('hmacWithSHA384', ),
+ '1.2.840.113549.2.11': ('hmacWithSHA512', ),
+ '1.2.840.113549.2.12': ('hmacWithSHA512-224', ),
+ '1.2.840.113549.2.13': ('hmacWithSHA512-256', ),
+ '1.2.840.113549.3.2': ('rc2-cbc', 'RC2-CBC'),
+ '1.2.840.113549.3.4': ('rc4', 'RC4'),
+ '1.2.840.113549.3.7': ('des-ede3-cbc', 'DES-EDE3-CBC'),
+ '1.2.840.113549.3.8': ('rc5-cbc', 'RC5-CBC'),
+ '1.2.840.113549.3.10': ('des-cdmf', 'DES-CDMF'),
+ '1.3': ('identified-organization', 'org', 'ORG'),
+ '1.3.6': ('dod', 'DOD'),
+ '1.3.6.1': ('iana', 'IANA', 'internet'),
+ '1.3.6.1.1': ('Directory', 'directory'),
+ '1.3.6.1.2': ('Management', 'mgmt'),
+ '1.3.6.1.3': ('Experimental', 'experimental'),
+ '1.3.6.1.4': ('Private', 'private'),
+ '1.3.6.1.4.1': ('Enterprises', 'enterprises'),
+ '1.3.6.1.4.1.188.7.1.1.2': ('idea-cbc', 'IDEA-CBC'),
+ '1.3.6.1.4.1.311.2.1.14': ('Microsoft Extension Request', 'msExtReq'),
+ '1.3.6.1.4.1.311.2.1.21': ('Microsoft Individual Code Signing', 'msCodeInd'),
+ '1.3.6.1.4.1.311.2.1.22': ('Microsoft Commercial Code Signing', 'msCodeCom'),
+ '1.3.6.1.4.1.311.10.3.1': ('Microsoft Trust List Signing', 'msCTLSign'),
+ '1.3.6.1.4.1.311.10.3.3': ('Microsoft Server Gated Crypto', 'msSGC'),
+ '1.3.6.1.4.1.311.10.3.4': ('Microsoft Encrypted File System', 'msEFS'),
+ '1.3.6.1.4.1.311.17.1': ('Microsoft CSP Name', 'CSPName'),
+ '1.3.6.1.4.1.311.17.2': ('Microsoft Local Key set', 'LocalKeySet'),
+ '1.3.6.1.4.1.311.20.2.2': ('Microsoft Smartcardlogin', 'msSmartcardLogin'),
+ '1.3.6.1.4.1.311.20.2.3': ('Microsoft Universal Principal Name', 'msUPN'),
+ '1.3.6.1.4.1.311.60.2.1.1': ('jurisdictionLocalityName', 'jurisdictionL'),
+ '1.3.6.1.4.1.311.60.2.1.2': ('jurisdictionStateOrProvinceName', 'jurisdictionST'),
+ '1.3.6.1.4.1.311.60.2.1.3': ('jurisdictionCountryName', 'jurisdictionC'),
+ '1.3.6.1.4.1.1466.344': ('dcObject', 'dcobject'),
+ '1.3.6.1.4.1.1722.12.2.1.16': ('blake2b512', 'BLAKE2b512'),
+ '1.3.6.1.4.1.1722.12.2.2.8': ('blake2s256', 'BLAKE2s256'),
+ '1.3.6.1.4.1.3029.1.2': ('bf-cbc', 'BF-CBC'),
+ '1.3.6.1.4.1.11129.2.4.2': ('CT Precertificate SCTs', 'ct_precert_scts'),
+ '1.3.6.1.4.1.11129.2.4.3': ('CT Precertificate Poison', 'ct_precert_poison'),
+ '1.3.6.1.4.1.11129.2.4.4': ('CT Precertificate Signer', 'ct_precert_signer'),
+ '1.3.6.1.4.1.11129.2.4.5': ('CT Certificate SCTs', 'ct_cert_scts'),
+ '1.3.6.1.4.1.11591.4.11': ('scrypt', 'id-scrypt'),
+ '1.3.6.1.5': ('Security', 'security'),
+ '1.3.6.1.5.2.3': ('id-pkinit', ),
+ '1.3.6.1.5.2.3.4': ('PKINIT Client Auth', 'pkInitClientAuth'),
+ '1.3.6.1.5.2.3.5': ('Signing KDC Response', 'pkInitKDC'),
+ '1.3.6.1.5.5.7': ('PKIX', ),
+ '1.3.6.1.5.5.7.0': ('id-pkix-mod', ),
+ '1.3.6.1.5.5.7.0.1': ('id-pkix1-explicit-88', ),
+ '1.3.6.1.5.5.7.0.2': ('id-pkix1-implicit-88', ),
+ '1.3.6.1.5.5.7.0.3': ('id-pkix1-explicit-93', ),
+ '1.3.6.1.5.5.7.0.4': ('id-pkix1-implicit-93', ),
+ '1.3.6.1.5.5.7.0.5': ('id-mod-crmf', ),
+ '1.3.6.1.5.5.7.0.6': ('id-mod-cmc', ),
+ '1.3.6.1.5.5.7.0.7': ('id-mod-kea-profile-88', ),
+ '1.3.6.1.5.5.7.0.8': ('id-mod-kea-profile-93', ),
+ '1.3.6.1.5.5.7.0.9': ('id-mod-cmp', ),
+ '1.3.6.1.5.5.7.0.10': ('id-mod-qualified-cert-88', ),
+ '1.3.6.1.5.5.7.0.11': ('id-mod-qualified-cert-93', ),
+ '1.3.6.1.5.5.7.0.12': ('id-mod-attribute-cert', ),
+ '1.3.6.1.5.5.7.0.13': ('id-mod-timestamp-protocol', ),
+ '1.3.6.1.5.5.7.0.14': ('id-mod-ocsp', ),
+ '1.3.6.1.5.5.7.0.15': ('id-mod-dvcs', ),
+ '1.3.6.1.5.5.7.0.16': ('id-mod-cmp2000', ),
+ '1.3.6.1.5.5.7.1': ('id-pe', ),
+ '1.3.6.1.5.5.7.1.1': ('Authority Information Access', 'authorityInfoAccess'),
+ '1.3.6.1.5.5.7.1.2': ('Biometric Info', 'biometricInfo'),
+ '1.3.6.1.5.5.7.1.3': ('qcStatements', ),
+ '1.3.6.1.5.5.7.1.4': ('ac-auditEntity', ),
+ '1.3.6.1.5.5.7.1.5': ('ac-targeting', ),
+ '1.3.6.1.5.5.7.1.6': ('aaControls', ),
+ '1.3.6.1.5.5.7.1.7': ('sbgp-ipAddrBlock', ),
+ '1.3.6.1.5.5.7.1.8': ('sbgp-autonomousSysNum', ),
+ '1.3.6.1.5.5.7.1.9': ('sbgp-routerIdentifier', ),
+ '1.3.6.1.5.5.7.1.10': ('ac-proxying', ),
+ '1.3.6.1.5.5.7.1.11': ('Subject Information Access', 'subjectInfoAccess'),
+ '1.3.6.1.5.5.7.1.14': ('Proxy Certificate Information', 'proxyCertInfo'),
+ '1.3.6.1.5.5.7.1.24': ('TLS Feature', 'tlsfeature'),
+ '1.3.6.1.5.5.7.2': ('id-qt', ),
+ '1.3.6.1.5.5.7.2.1': ('Policy Qualifier CPS', 'id-qt-cps'),
+ '1.3.6.1.5.5.7.2.2': ('Policy Qualifier User Notice', 'id-qt-unotice'),
+ '1.3.6.1.5.5.7.2.3': ('textNotice', ),
+ '1.3.6.1.5.5.7.3': ('id-kp', ),
+ '1.3.6.1.5.5.7.3.1': ('TLS Web Server Authentication', 'serverAuth'),
+ '1.3.6.1.5.5.7.3.2': ('TLS Web Client Authentication', 'clientAuth'),
+ '1.3.6.1.5.5.7.3.3': ('Code Signing', 'codeSigning'),
+ '1.3.6.1.5.5.7.3.4': ('E-mail Protection', 'emailProtection'),
+ '1.3.6.1.5.5.7.3.5': ('IPSec End System', 'ipsecEndSystem'),
+ '1.3.6.1.5.5.7.3.6': ('IPSec Tunnel', 'ipsecTunnel'),
+ '1.3.6.1.5.5.7.3.7': ('IPSec User', 'ipsecUser'),
+ '1.3.6.1.5.5.7.3.8': ('Time Stamping', 'timeStamping'),
+ '1.3.6.1.5.5.7.3.9': ('OCSP Signing', 'OCSPSigning'),
+ '1.3.6.1.5.5.7.3.10': ('dvcs', 'DVCS'),
+ '1.3.6.1.5.5.7.3.17': ('ipsec Internet Key Exchange', 'ipsecIKE'),
+ '1.3.6.1.5.5.7.3.18': ('Ctrl/provision WAP Access', 'capwapAC'),
+ '1.3.6.1.5.5.7.3.19': ('Ctrl/Provision WAP Termination', 'capwapWTP'),
+ '1.3.6.1.5.5.7.3.21': ('SSH Client', 'secureShellClient'),
+ '1.3.6.1.5.5.7.3.22': ('SSH Server', 'secureShellServer'),
+ '1.3.6.1.5.5.7.3.23': ('Send Router', 'sendRouter'),
+ '1.3.6.1.5.5.7.3.24': ('Send Proxied Router', 'sendProxiedRouter'),
+ '1.3.6.1.5.5.7.3.25': ('Send Owner', 'sendOwner'),
+ '1.3.6.1.5.5.7.3.26': ('Send Proxied Owner', 'sendProxiedOwner'),
+ '1.3.6.1.5.5.7.3.27': ('CMC Certificate Authority', 'cmcCA'),
+ '1.3.6.1.5.5.7.3.28': ('CMC Registration Authority', 'cmcRA'),
+ '1.3.6.1.5.5.7.4': ('id-it', ),
+ '1.3.6.1.5.5.7.4.1': ('id-it-caProtEncCert', ),
+ '1.3.6.1.5.5.7.4.2': ('id-it-signKeyPairTypes', ),
+ '1.3.6.1.5.5.7.4.3': ('id-it-encKeyPairTypes', ),
+ '1.3.6.1.5.5.7.4.4': ('id-it-preferredSymmAlg', ),
+ '1.3.6.1.5.5.7.4.5': ('id-it-caKeyUpdateInfo', ),
+ '1.3.6.1.5.5.7.4.6': ('id-it-currentCRL', ),
+ '1.3.6.1.5.5.7.4.7': ('id-it-unsupportedOIDs', ),
+ '1.3.6.1.5.5.7.4.8': ('id-it-subscriptionRequest', ),
+ '1.3.6.1.5.5.7.4.9': ('id-it-subscriptionResponse', ),
+ '1.3.6.1.5.5.7.4.10': ('id-it-keyPairParamReq', ),
+ '1.3.6.1.5.5.7.4.11': ('id-it-keyPairParamRep', ),
+ '1.3.6.1.5.5.7.4.12': ('id-it-revPassphrase', ),
+ '1.3.6.1.5.5.7.4.13': ('id-it-implicitConfirm', ),
+ '1.3.6.1.5.5.7.4.14': ('id-it-confirmWaitTime', ),
+ '1.3.6.1.5.5.7.4.15': ('id-it-origPKIMessage', ),
+ '1.3.6.1.5.5.7.4.16': ('id-it-suppLangTags', ),
+ '1.3.6.1.5.5.7.5': ('id-pkip', ),
+ '1.3.6.1.5.5.7.5.1': ('id-regCtrl', ),
+ '1.3.6.1.5.5.7.5.1.1': ('id-regCtrl-regToken', ),
+ '1.3.6.1.5.5.7.5.1.2': ('id-regCtrl-authenticator', ),
+ '1.3.6.1.5.5.7.5.1.3': ('id-regCtrl-pkiPublicationInfo', ),
+ '1.3.6.1.5.5.7.5.1.4': ('id-regCtrl-pkiArchiveOptions', ),
+ '1.3.6.1.5.5.7.5.1.5': ('id-regCtrl-oldCertID', ),
+ '1.3.6.1.5.5.7.5.1.6': ('id-regCtrl-protocolEncrKey', ),
+ '1.3.6.1.5.5.7.5.2': ('id-regInfo', ),
+ '1.3.6.1.5.5.7.5.2.1': ('id-regInfo-utf8Pairs', ),
+ '1.3.6.1.5.5.7.5.2.2': ('id-regInfo-certReq', ),
+ '1.3.6.1.5.5.7.6': ('id-alg', ),
+ '1.3.6.1.5.5.7.6.1': ('id-alg-des40', ),
+ '1.3.6.1.5.5.7.6.2': ('id-alg-noSignature', ),
+ '1.3.6.1.5.5.7.6.3': ('id-alg-dh-sig-hmac-sha1', ),
+ '1.3.6.1.5.5.7.6.4': ('id-alg-dh-pop', ),
+ '1.3.6.1.5.5.7.7': ('id-cmc', ),
+ '1.3.6.1.5.5.7.7.1': ('id-cmc-statusInfo', ),
+ '1.3.6.1.5.5.7.7.2': ('id-cmc-identification', ),
+ '1.3.6.1.5.5.7.7.3': ('id-cmc-identityProof', ),
+ '1.3.6.1.5.5.7.7.4': ('id-cmc-dataReturn', ),
+ '1.3.6.1.5.5.7.7.5': ('id-cmc-transactionId', ),
+ '1.3.6.1.5.5.7.7.6': ('id-cmc-senderNonce', ),
+ '1.3.6.1.5.5.7.7.7': ('id-cmc-recipientNonce', ),
+ '1.3.6.1.5.5.7.7.8': ('id-cmc-addExtensions', ),
+ '1.3.6.1.5.5.7.7.9': ('id-cmc-encryptedPOP', ),
+ '1.3.6.1.5.5.7.7.10': ('id-cmc-decryptedPOP', ),
+ '1.3.6.1.5.5.7.7.11': ('id-cmc-lraPOPWitness', ),
+ '1.3.6.1.5.5.7.7.15': ('id-cmc-getCert', ),
+ '1.3.6.1.5.5.7.7.16': ('id-cmc-getCRL', ),
+ '1.3.6.1.5.5.7.7.17': ('id-cmc-revokeRequest', ),
+ '1.3.6.1.5.5.7.7.18': ('id-cmc-regInfo', ),
+ '1.3.6.1.5.5.7.7.19': ('id-cmc-responseInfo', ),
+ '1.3.6.1.5.5.7.7.21': ('id-cmc-queryPending', ),
+ '1.3.6.1.5.5.7.7.22': ('id-cmc-popLinkRandom', ),
+ '1.3.6.1.5.5.7.7.23': ('id-cmc-popLinkWitness', ),
+ '1.3.6.1.5.5.7.7.24': ('id-cmc-confirmCertAcceptance', ),
+ '1.3.6.1.5.5.7.8': ('id-on', ),
+ '1.3.6.1.5.5.7.8.1': ('id-on-personalData', ),
+ '1.3.6.1.5.5.7.8.3': ('Permanent Identifier', 'id-on-permanentIdentifier'),
+ '1.3.6.1.5.5.7.9': ('id-pda', ),
+ '1.3.6.1.5.5.7.9.1': ('id-pda-dateOfBirth', ),
+ '1.3.6.1.5.5.7.9.2': ('id-pda-placeOfBirth', ),
+ '1.3.6.1.5.5.7.9.3': ('id-pda-gender', ),
+ '1.3.6.1.5.5.7.9.4': ('id-pda-countryOfCitizenship', ),
+ '1.3.6.1.5.5.7.9.5': ('id-pda-countryOfResidence', ),
+ '1.3.6.1.5.5.7.10': ('id-aca', ),
+ '1.3.6.1.5.5.7.10.1': ('id-aca-authenticationInfo', ),
+ '1.3.6.1.5.5.7.10.2': ('id-aca-accessIdentity', ),
+ '1.3.6.1.5.5.7.10.3': ('id-aca-chargingIdentity', ),
+ '1.3.6.1.5.5.7.10.4': ('id-aca-group', ),
+ '1.3.6.1.5.5.7.10.5': ('id-aca-role', ),
+ '1.3.6.1.5.5.7.10.6': ('id-aca-encAttrs', ),
+ '1.3.6.1.5.5.7.11': ('id-qcs', ),
+ '1.3.6.1.5.5.7.11.1': ('id-qcs-pkixQCSyntax-v1', ),
+ '1.3.6.1.5.5.7.12': ('id-cct', ),
+ '1.3.6.1.5.5.7.12.1': ('id-cct-crs', ),
+ '1.3.6.1.5.5.7.12.2': ('id-cct-PKIData', ),
+ '1.3.6.1.5.5.7.12.3': ('id-cct-PKIResponse', ),
+ '1.3.6.1.5.5.7.21': ('id-ppl', ),
+ '1.3.6.1.5.5.7.21.0': ('Any language', 'id-ppl-anyLanguage'),
+ '1.3.6.1.5.5.7.21.1': ('Inherit all', 'id-ppl-inheritAll'),
+ '1.3.6.1.5.5.7.21.2': ('Independent', 'id-ppl-independent'),
+ '1.3.6.1.5.5.7.48': ('id-ad', ),
+ '1.3.6.1.5.5.7.48.1': ('OCSP', 'OCSP', 'id-pkix-OCSP'),
+ '1.3.6.1.5.5.7.48.1.1': ('Basic OCSP Response', 'basicOCSPResponse'),
+ '1.3.6.1.5.5.7.48.1.2': ('OCSP Nonce', 'Nonce'),
+ '1.3.6.1.5.5.7.48.1.3': ('OCSP CRL ID', 'CrlID'),
+ '1.3.6.1.5.5.7.48.1.4': ('Acceptable OCSP Responses', 'acceptableResponses'),
+ '1.3.6.1.5.5.7.48.1.5': ('OCSP No Check', 'noCheck'),
+ '1.3.6.1.5.5.7.48.1.6': ('OCSP Archive Cutoff', 'archiveCutoff'),
+ '1.3.6.1.5.5.7.48.1.7': ('OCSP Service Locator', 'serviceLocator'),
+ '1.3.6.1.5.5.7.48.1.8': ('Extended OCSP Status', 'extendedStatus'),
+ '1.3.6.1.5.5.7.48.1.9': ('valid', ),
+ '1.3.6.1.5.5.7.48.1.10': ('path', ),
+ '1.3.6.1.5.5.7.48.1.11': ('Trust Root', 'trustRoot'),
+ '1.3.6.1.5.5.7.48.2': ('CA Issuers', 'caIssuers'),
+ '1.3.6.1.5.5.7.48.3': ('AD Time Stamping', 'ad_timestamping'),
+ '1.3.6.1.5.5.7.48.4': ('ad dvcs', 'AD_DVCS'),
+ '1.3.6.1.5.5.7.48.5': ('CA Repository', 'caRepository'),
+ '1.3.6.1.5.5.8.1.1': ('hmac-md5', 'HMAC-MD5'),
+ '1.3.6.1.5.5.8.1.2': ('hmac-sha1', 'HMAC-SHA1'),
+ '1.3.6.1.6': ('SNMPv2', 'snmpv2'),
+ '1.3.6.1.7': ('Mail', ),
+ '1.3.6.1.7.1': ('MIME MHS', 'mime-mhs'),
+ '1.3.6.1.7.1.1': ('mime-mhs-headings', 'mime-mhs-headings'),
+ '1.3.6.1.7.1.1.1': ('id-hex-partial-message', 'id-hex-partial-message'),
+ '1.3.6.1.7.1.1.2': ('id-hex-multipart-message', 'id-hex-multipart-message'),
+ '1.3.6.1.7.1.2': ('mime-mhs-bodies', 'mime-mhs-bodies'),
+ '1.3.14.3.2': ('algorithm', 'algorithm'),
+ '1.3.14.3.2.3': ('md5WithRSA', 'RSA-NP-MD5'),
+ '1.3.14.3.2.6': ('des-ecb', 'DES-ECB'),
+ '1.3.14.3.2.7': ('des-cbc', 'DES-CBC'),
+ '1.3.14.3.2.8': ('des-ofb', 'DES-OFB'),
+ '1.3.14.3.2.9': ('des-cfb', 'DES-CFB'),
+ '1.3.14.3.2.11': ('rsaSignature', ),
+ '1.3.14.3.2.12': ('dsaEncryption-old', 'DSA-old'),
+ '1.3.14.3.2.13': ('dsaWithSHA', 'DSA-SHA'),
+ '1.3.14.3.2.15': ('shaWithRSAEncryption', 'RSA-SHA'),
+ '1.3.14.3.2.17': ('des-ede', 'DES-EDE'),
+ '1.3.14.3.2.18': ('sha', 'SHA'),
+ '1.3.14.3.2.26': ('sha1', 'SHA1'),
+ '1.3.14.3.2.27': ('dsaWithSHA1-old', 'DSA-SHA1-old'),
+ '1.3.14.3.2.29': ('sha1WithRSA', 'RSA-SHA1-2'),
+ '1.3.36.3.2.1': ('ripemd160', 'RIPEMD160'),
+ '1.3.36.3.3.1.2': ('ripemd160WithRSA', 'RSA-RIPEMD160'),
+ '1.3.36.3.3.2.8.1.1.1': ('brainpoolP160r1', ),
+ '1.3.36.3.3.2.8.1.1.2': ('brainpoolP160t1', ),
+ '1.3.36.3.3.2.8.1.1.3': ('brainpoolP192r1', ),
+ '1.3.36.3.3.2.8.1.1.4': ('brainpoolP192t1', ),
+ '1.3.36.3.3.2.8.1.1.5': ('brainpoolP224r1', ),
+ '1.3.36.3.3.2.8.1.1.6': ('brainpoolP224t1', ),
+ '1.3.36.3.3.2.8.1.1.7': ('brainpoolP256r1', ),
+ '1.3.36.3.3.2.8.1.1.8': ('brainpoolP256t1', ),
+ '1.3.36.3.3.2.8.1.1.9': ('brainpoolP320r1', ),
+ '1.3.36.3.3.2.8.1.1.10': ('brainpoolP320t1', ),
+ '1.3.36.3.3.2.8.1.1.11': ('brainpoolP384r1', ),
+ '1.3.36.3.3.2.8.1.1.12': ('brainpoolP384t1', ),
+ '1.3.36.3.3.2.8.1.1.13': ('brainpoolP512r1', ),
+ '1.3.36.3.3.2.8.1.1.14': ('brainpoolP512t1', ),
+ '1.3.36.8.3.3': ('Professional Information or basis for Admission', 'x509ExtAdmission'),
+ '1.3.101.1.4.1': ('Strong Extranet ID', 'SXNetID'),
+ '1.3.101.110': ('X25519', ),
+ '1.3.101.111': ('X448', ),
+ '1.3.101.112': ('ED25519', ),
+ '1.3.101.113': ('ED448', ),
+ '1.3.111': ('ieee', ),
+ '1.3.111.2.1619': ('IEEE Security in Storage Working Group', 'ieee-siswg'),
+ '1.3.111.2.1619.0.1.1': ('aes-128-xts', 'AES-128-XTS'),
+ '1.3.111.2.1619.0.1.2': ('aes-256-xts', 'AES-256-XTS'),
+ '1.3.132': ('certicom-arc', ),
+ '1.3.132.0': ('secg_ellipticCurve', ),
+ '1.3.132.0.1': ('sect163k1', ),
+ '1.3.132.0.2': ('sect163r1', ),
+ '1.3.132.0.3': ('sect239k1', ),
+ '1.3.132.0.4': ('sect113r1', ),
+ '1.3.132.0.5': ('sect113r2', ),
+ '1.3.132.0.6': ('secp112r1', ),
+ '1.3.132.0.7': ('secp112r2', ),
+ '1.3.132.0.8': ('secp160r1', ),
+ '1.3.132.0.9': ('secp160k1', ),
+ '1.3.132.0.10': ('secp256k1', ),
+ '1.3.132.0.15': ('sect163r2', ),
+ '1.3.132.0.16': ('sect283k1', ),
+ '1.3.132.0.17': ('sect283r1', ),
+ '1.3.132.0.22': ('sect131r1', ),
+ '1.3.132.0.23': ('sect131r2', ),
+ '1.3.132.0.24': ('sect193r1', ),
+ '1.3.132.0.25': ('sect193r2', ),
+ '1.3.132.0.26': ('sect233k1', ),
+ '1.3.132.0.27': ('sect233r1', ),
+ '1.3.132.0.28': ('secp128r1', ),
+ '1.3.132.0.29': ('secp128r2', ),
+ '1.3.132.0.30': ('secp160r2', ),
+ '1.3.132.0.31': ('secp192k1', ),
+ '1.3.132.0.32': ('secp224k1', ),
+ '1.3.132.0.33': ('secp224r1', ),
+ '1.3.132.0.34': ('secp384r1', ),
+ '1.3.132.0.35': ('secp521r1', ),
+ '1.3.132.0.36': ('sect409k1', ),
+ '1.3.132.0.37': ('sect409r1', ),
+ '1.3.132.0.38': ('sect571k1', ),
+ '1.3.132.0.39': ('sect571r1', ),
+ '1.3.132.1': ('secg-scheme', ),
+ '1.3.132.1.11.0': ('dhSinglePass-stdDH-sha224kdf-scheme', ),
+ '1.3.132.1.11.1': ('dhSinglePass-stdDH-sha256kdf-scheme', ),
+ '1.3.132.1.11.2': ('dhSinglePass-stdDH-sha384kdf-scheme', ),
+ '1.3.132.1.11.3': ('dhSinglePass-stdDH-sha512kdf-scheme', ),
+ '1.3.132.1.14.0': ('dhSinglePass-cofactorDH-sha224kdf-scheme', ),
+ '1.3.132.1.14.1': ('dhSinglePass-cofactorDH-sha256kdf-scheme', ),
+ '1.3.132.1.14.2': ('dhSinglePass-cofactorDH-sha384kdf-scheme', ),
+ '1.3.132.1.14.3': ('dhSinglePass-cofactorDH-sha512kdf-scheme', ),
+ '1.3.133.16.840.63.0': ('x9-63-scheme', ),
+ '1.3.133.16.840.63.0.2': ('dhSinglePass-stdDH-sha1kdf-scheme', ),
+ '1.3.133.16.840.63.0.3': ('dhSinglePass-cofactorDH-sha1kdf-scheme', ),
+ '2': ('joint-iso-itu-t', 'JOINT-ISO-ITU-T', 'joint-iso-ccitt'),
+ '2.5': ('directory services (X.500)', 'X500'),
+ '2.5.1.5': ('Selected Attribute Types', 'selected-attribute-types'),
+ '2.5.1.5.55': ('clearance', ),
+ '2.5.4': ('X509', ),
+ '2.5.4.3': ('commonName', 'CN'),
+ '2.5.4.4': ('surname', 'SN'),
+ '2.5.4.5': ('serialNumber', ),
+ '2.5.4.6': ('countryName', 'C'),
+ '2.5.4.7': ('localityName', 'L'),
+ '2.5.4.8': ('stateOrProvinceName', 'ST'),
+ '2.5.4.9': ('streetAddress', 'street'),
+ '2.5.4.10': ('organizationName', 'O'),
+ '2.5.4.11': ('organizationalUnitName', 'OU'),
+ '2.5.4.12': ('title', 'title'),
+ '2.5.4.13': ('description', ),
+ '2.5.4.14': ('searchGuide', ),
+ '2.5.4.15': ('businessCategory', ),
+ '2.5.4.16': ('postalAddress', ),
+ '2.5.4.17': ('postalCode', ),
+ '2.5.4.18': ('postOfficeBox', ),
+ '2.5.4.19': ('physicalDeliveryOfficeName', ),
+ '2.5.4.20': ('telephoneNumber', ),
+ '2.5.4.21': ('telexNumber', ),
+ '2.5.4.22': ('teletexTerminalIdentifier', ),
+ '2.5.4.23': ('facsimileTelephoneNumber', ),
+ '2.5.4.24': ('x121Address', ),
+ '2.5.4.25': ('internationaliSDNNumber', ),
+ '2.5.4.26': ('registeredAddress', ),
+ '2.5.4.27': ('destinationIndicator', ),
+ '2.5.4.28': ('preferredDeliveryMethod', ),
+ '2.5.4.29': ('presentationAddress', ),
+ '2.5.4.30': ('supportedApplicationContext', ),
+ '2.5.4.31': ('member', ),
+ '2.5.4.32': ('owner', ),
+ '2.5.4.33': ('roleOccupant', ),
+ '2.5.4.34': ('seeAlso', ),
+ '2.5.4.35': ('userPassword', ),
+ '2.5.4.36': ('userCertificate', ),
+ '2.5.4.37': ('cACertificate', ),
+ '2.5.4.38': ('authorityRevocationList', ),
+ '2.5.4.39': ('certificateRevocationList', ),
+ '2.5.4.40': ('crossCertificatePair', ),
+ '2.5.4.41': ('name', 'name'),
+ '2.5.4.42': ('givenName', 'GN'),
+ '2.5.4.43': ('initials', 'initials'),
+ '2.5.4.44': ('generationQualifier', ),
+ '2.5.4.45': ('x500UniqueIdentifier', ),
+ '2.5.4.46': ('dnQualifier', 'dnQualifier'),
+ '2.5.4.47': ('enhancedSearchGuide', ),
+ '2.5.4.48': ('protocolInformation', ),
+ '2.5.4.49': ('distinguishedName', ),
+ '2.5.4.50': ('uniqueMember', ),
+ '2.5.4.51': ('houseIdentifier', ),
+ '2.5.4.52': ('supportedAlgorithms', ),
+ '2.5.4.53': ('deltaRevocationList', ),
+ '2.5.4.54': ('dmdName', ),
+ '2.5.4.65': ('pseudonym', ),
+ '2.5.4.72': ('role', 'role'),
+ '2.5.4.97': ('organizationIdentifier', ),
+ '2.5.4.98': ('countryCode3c', 'c3'),
+ '2.5.4.99': ('countryCode3n', 'n3'),
+ '2.5.4.100': ('dnsName', ),
+ '2.5.8': ('directory services - algorithms', 'X500algorithms'),
+ '2.5.8.1.1': ('rsa', 'RSA'),
+ '2.5.8.3.100': ('mdc2WithRSA', 'RSA-MDC2'),
+ '2.5.8.3.101': ('mdc2', 'MDC2'),
+ '2.5.29': ('id-ce', ),
+ '2.5.29.9': ('X509v3 Subject Directory Attributes', 'subjectDirectoryAttributes'),
+ '2.5.29.14': ('X509v3 Subject Key Identifier', 'subjectKeyIdentifier'),
+ '2.5.29.15': ('X509v3 Key Usage', 'keyUsage'),
+ '2.5.29.16': ('X509v3 Private Key Usage Period', 'privateKeyUsagePeriod'),
+ '2.5.29.17': ('X509v3 Subject Alternative Name', 'subjectAltName'),
+ '2.5.29.18': ('X509v3 Issuer Alternative Name', 'issuerAltName'),
+ '2.5.29.19': ('X509v3 Basic Constraints', 'basicConstraints'),
+ '2.5.29.20': ('X509v3 CRL Number', 'crlNumber'),
+ '2.5.29.21': ('X509v3 CRL Reason Code', 'CRLReason'),
+ '2.5.29.23': ('Hold Instruction Code', 'holdInstructionCode'),
+ '2.5.29.24': ('Invalidity Date', 'invalidityDate'),
+ '2.5.29.27': ('X509v3 Delta CRL Indicator', 'deltaCRL'),
+ '2.5.29.28': ('X509v3 Issuing Distribution Point', 'issuingDistributionPoint'),
+ '2.5.29.29': ('X509v3 Certificate Issuer', 'certificateIssuer'),
+ '2.5.29.30': ('X509v3 Name Constraints', 'nameConstraints'),
+ '2.5.29.31': ('X509v3 CRL Distribution Points', 'crlDistributionPoints'),
+ '2.5.29.32': ('X509v3 Certificate Policies', 'certificatePolicies'),
+ '2.5.29.32.0': ('X509v3 Any Policy', 'anyPolicy'),
+ '2.5.29.33': ('X509v3 Policy Mappings', 'policyMappings'),
+ '2.5.29.35': ('X509v3 Authority Key Identifier', 'authorityKeyIdentifier'),
+ '2.5.29.36': ('X509v3 Policy Constraints', 'policyConstraints'),
+ '2.5.29.37': ('X509v3 Extended Key Usage', 'extendedKeyUsage'),
+ '2.5.29.37.0': ('Any Extended Key Usage', 'anyExtendedKeyUsage'),
+ '2.5.29.46': ('X509v3 Freshest CRL', 'freshestCRL'),
+ '2.5.29.54': ('X509v3 Inhibit Any Policy', 'inhibitAnyPolicy'),
+ '2.5.29.55': ('X509v3 AC Targeting', 'targetInformation'),
+ '2.5.29.56': ('X509v3 No Revocation Available', 'noRevAvail'),
+ '2.16.840.1.101.3': ('csor', ),
+ '2.16.840.1.101.3.4': ('nistAlgorithms', ),
+ '2.16.840.1.101.3.4.1': ('aes', ),
+ '2.16.840.1.101.3.4.1.1': ('aes-128-ecb', 'AES-128-ECB'),
+ '2.16.840.1.101.3.4.1.2': ('aes-128-cbc', 'AES-128-CBC'),
+ '2.16.840.1.101.3.4.1.3': ('aes-128-ofb', 'AES-128-OFB'),
+ '2.16.840.1.101.3.4.1.4': ('aes-128-cfb', 'AES-128-CFB'),
+ '2.16.840.1.101.3.4.1.5': ('id-aes128-wrap', ),
+ '2.16.840.1.101.3.4.1.6': ('aes-128-gcm', 'id-aes128-GCM'),
+ '2.16.840.1.101.3.4.1.7': ('aes-128-ccm', 'id-aes128-CCM'),
+ '2.16.840.1.101.3.4.1.8': ('id-aes128-wrap-pad', ),
+ '2.16.840.1.101.3.4.1.21': ('aes-192-ecb', 'AES-192-ECB'),
+ '2.16.840.1.101.3.4.1.22': ('aes-192-cbc', 'AES-192-CBC'),
+ '2.16.840.1.101.3.4.1.23': ('aes-192-ofb', 'AES-192-OFB'),
+ '2.16.840.1.101.3.4.1.24': ('aes-192-cfb', 'AES-192-CFB'),
+ '2.16.840.1.101.3.4.1.25': ('id-aes192-wrap', ),
+ '2.16.840.1.101.3.4.1.26': ('aes-192-gcm', 'id-aes192-GCM'),
+ '2.16.840.1.101.3.4.1.27': ('aes-192-ccm', 'id-aes192-CCM'),
+ '2.16.840.1.101.3.4.1.28': ('id-aes192-wrap-pad', ),
+ '2.16.840.1.101.3.4.1.41': ('aes-256-ecb', 'AES-256-ECB'),
+ '2.16.840.1.101.3.4.1.42': ('aes-256-cbc', 'AES-256-CBC'),
+ '2.16.840.1.101.3.4.1.43': ('aes-256-ofb', 'AES-256-OFB'),
+ '2.16.840.1.101.3.4.1.44': ('aes-256-cfb', 'AES-256-CFB'),
+ '2.16.840.1.101.3.4.1.45': ('id-aes256-wrap', ),
+ '2.16.840.1.101.3.4.1.46': ('aes-256-gcm', 'id-aes256-GCM'),
+ '2.16.840.1.101.3.4.1.47': ('aes-256-ccm', 'id-aes256-CCM'),
+ '2.16.840.1.101.3.4.1.48': ('id-aes256-wrap-pad', ),
+ '2.16.840.1.101.3.4.2': ('nist_hashalgs', ),
+ '2.16.840.1.101.3.4.2.1': ('sha256', 'SHA256'),
+ '2.16.840.1.101.3.4.2.2': ('sha384', 'SHA384'),
+ '2.16.840.1.101.3.4.2.3': ('sha512', 'SHA512'),
+ '2.16.840.1.101.3.4.2.4': ('sha224', 'SHA224'),
+ '2.16.840.1.101.3.4.2.5': ('sha512-224', 'SHA512-224'),
+ '2.16.840.1.101.3.4.2.6': ('sha512-256', 'SHA512-256'),
+ '2.16.840.1.101.3.4.2.7': ('sha3-224', 'SHA3-224'),
+ '2.16.840.1.101.3.4.2.8': ('sha3-256', 'SHA3-256'),
+ '2.16.840.1.101.3.4.2.9': ('sha3-384', 'SHA3-384'),
+ '2.16.840.1.101.3.4.2.10': ('sha3-512', 'SHA3-512'),
+ '2.16.840.1.101.3.4.2.11': ('shake128', 'SHAKE128'),
+ '2.16.840.1.101.3.4.2.12': ('shake256', 'SHAKE256'),
+ '2.16.840.1.101.3.4.2.13': ('hmac-sha3-224', 'id-hmacWithSHA3-224'),
+ '2.16.840.1.101.3.4.2.14': ('hmac-sha3-256', 'id-hmacWithSHA3-256'),
+ '2.16.840.1.101.3.4.2.15': ('hmac-sha3-384', 'id-hmacWithSHA3-384'),
+ '2.16.840.1.101.3.4.2.16': ('hmac-sha3-512', 'id-hmacWithSHA3-512'),
+ '2.16.840.1.101.3.4.3': ('dsa_with_sha2', 'sigAlgs'),
+ '2.16.840.1.101.3.4.3.1': ('dsa_with_SHA224', ),
+ '2.16.840.1.101.3.4.3.2': ('dsa_with_SHA256', ),
+ '2.16.840.1.101.3.4.3.3': ('dsa_with_SHA384', 'id-dsa-with-sha384'),
+ '2.16.840.1.101.3.4.3.4': ('dsa_with_SHA512', 'id-dsa-with-sha512'),
+ '2.16.840.1.101.3.4.3.5': ('dsa_with_SHA3-224', 'id-dsa-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.6': ('dsa_with_SHA3-256', 'id-dsa-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.7': ('dsa_with_SHA3-384', 'id-dsa-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.8': ('dsa_with_SHA3-512', 'id-dsa-with-sha3-512'),
+ '2.16.840.1.101.3.4.3.9': ('ecdsa_with_SHA3-224', 'id-ecdsa-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.10': ('ecdsa_with_SHA3-256', 'id-ecdsa-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.11': ('ecdsa_with_SHA3-384', 'id-ecdsa-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.12': ('ecdsa_with_SHA3-512', 'id-ecdsa-with-sha3-512'),
+ '2.16.840.1.101.3.4.3.13': ('RSA-SHA3-224', 'id-rsassa-pkcs1-v1_5-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.14': ('RSA-SHA3-256', 'id-rsassa-pkcs1-v1_5-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.15': ('RSA-SHA3-384', 'id-rsassa-pkcs1-v1_5-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.16': ('RSA-SHA3-512', 'id-rsassa-pkcs1-v1_5-with-sha3-512'),
+ '2.16.840.1.113730': ('Netscape Communications Corp.', 'Netscape'),
+ '2.16.840.1.113730.1': ('Netscape Certificate Extension', 'nsCertExt'),
+ '2.16.840.1.113730.1.1': ('Netscape Cert Type', 'nsCertType'),
+ '2.16.840.1.113730.1.2': ('Netscape Base Url', 'nsBaseUrl'),
+ '2.16.840.1.113730.1.3': ('Netscape Revocation Url', 'nsRevocationUrl'),
+ '2.16.840.1.113730.1.4': ('Netscape CA Revocation Url', 'nsCaRevocationUrl'),
+ '2.16.840.1.113730.1.7': ('Netscape Renewal Url', 'nsRenewalUrl'),
+ '2.16.840.1.113730.1.8': ('Netscape CA Policy Url', 'nsCaPolicyUrl'),
+ '2.16.840.1.113730.1.12': ('Netscape SSL Server Name', 'nsSslServerName'),
+ '2.16.840.1.113730.1.13': ('Netscape Comment', 'nsComment'),
+ '2.16.840.1.113730.2': ('Netscape Data Type', 'nsDataType'),
+ '2.16.840.1.113730.2.5': ('Netscape Certificate Sequence', 'nsCertSequence'),
+ '2.16.840.1.113730.4.1': ('Netscape Server Gated Crypto', 'nsSGC'),
+ '2.23': ('International Organizations', 'international-organizations'),
+ '2.23.42': ('Secure Electronic Transactions', 'id-set'),
+ '2.23.42.0': ('content types', 'set-ctype'),
+ '2.23.42.0.0': ('setct-PANData', ),
+ '2.23.42.0.1': ('setct-PANToken', ),
+ '2.23.42.0.2': ('setct-PANOnly', ),
+ '2.23.42.0.3': ('setct-OIData', ),
+ '2.23.42.0.4': ('setct-PI', ),
+ '2.23.42.0.5': ('setct-PIData', ),
+ '2.23.42.0.6': ('setct-PIDataUnsigned', ),
+ '2.23.42.0.7': ('setct-HODInput', ),
+ '2.23.42.0.8': ('setct-AuthResBaggage', ),
+ '2.23.42.0.9': ('setct-AuthRevReqBaggage', ),
+ '2.23.42.0.10': ('setct-AuthRevResBaggage', ),
+ '2.23.42.0.11': ('setct-CapTokenSeq', ),
+ '2.23.42.0.12': ('setct-PInitResData', ),
+ '2.23.42.0.13': ('setct-PI-TBS', ),
+ '2.23.42.0.14': ('setct-PResData', ),
+ '2.23.42.0.16': ('setct-AuthReqTBS', ),
+ '2.23.42.0.17': ('setct-AuthResTBS', ),
+ '2.23.42.0.18': ('setct-AuthResTBSX', ),
+ '2.23.42.0.19': ('setct-AuthTokenTBS', ),
+ '2.23.42.0.20': ('setct-CapTokenData', ),
+ '2.23.42.0.21': ('setct-CapTokenTBS', ),
+ '2.23.42.0.22': ('setct-AcqCardCodeMsg', ),
+ '2.23.42.0.23': ('setct-AuthRevReqTBS', ),
+ '2.23.42.0.24': ('setct-AuthRevResData', ),
+ '2.23.42.0.25': ('setct-AuthRevResTBS', ),
+ '2.23.42.0.26': ('setct-CapReqTBS', ),
+ '2.23.42.0.27': ('setct-CapReqTBSX', ),
+ '2.23.42.0.28': ('setct-CapResData', ),
+ '2.23.42.0.29': ('setct-CapRevReqTBS', ),
+ '2.23.42.0.30': ('setct-CapRevReqTBSX', ),
+ '2.23.42.0.31': ('setct-CapRevResData', ),
+ '2.23.42.0.32': ('setct-CredReqTBS', ),
+ '2.23.42.0.33': ('setct-CredReqTBSX', ),
+ '2.23.42.0.34': ('setct-CredResData', ),
+ '2.23.42.0.35': ('setct-CredRevReqTBS', ),
+ '2.23.42.0.36': ('setct-CredRevReqTBSX', ),
+ '2.23.42.0.37': ('setct-CredRevResData', ),
+ '2.23.42.0.38': ('setct-PCertReqData', ),
+ '2.23.42.0.39': ('setct-PCertResTBS', ),
+ '2.23.42.0.40': ('setct-BatchAdminReqData', ),
+ '2.23.42.0.41': ('setct-BatchAdminResData', ),
+ '2.23.42.0.42': ('setct-CardCInitResTBS', ),
+ '2.23.42.0.43': ('setct-MeAqCInitResTBS', ),
+ '2.23.42.0.44': ('setct-RegFormResTBS', ),
+ '2.23.42.0.45': ('setct-CertReqData', ),
+ '2.23.42.0.46': ('setct-CertReqTBS', ),
+ '2.23.42.0.47': ('setct-CertResData', ),
+ '2.23.42.0.48': ('setct-CertInqReqTBS', ),
+ '2.23.42.0.49': ('setct-ErrorTBS', ),
+ '2.23.42.0.50': ('setct-PIDualSignedTBE', ),
+ '2.23.42.0.51': ('setct-PIUnsignedTBE', ),
+ '2.23.42.0.52': ('setct-AuthReqTBE', ),
+ '2.23.42.0.53': ('setct-AuthResTBE', ),
+ '2.23.42.0.54': ('setct-AuthResTBEX', ),
+ '2.23.42.0.55': ('setct-AuthTokenTBE', ),
+ '2.23.42.0.56': ('setct-CapTokenTBE', ),
+ '2.23.42.0.57': ('setct-CapTokenTBEX', ),
+ '2.23.42.0.58': ('setct-AcqCardCodeMsgTBE', ),
+ '2.23.42.0.59': ('setct-AuthRevReqTBE', ),
+ '2.23.42.0.60': ('setct-AuthRevResTBE', ),
+ '2.23.42.0.61': ('setct-AuthRevResTBEB', ),
+ '2.23.42.0.62': ('setct-CapReqTBE', ),
+ '2.23.42.0.63': ('setct-CapReqTBEX', ),
+ '2.23.42.0.64': ('setct-CapResTBE', ),
+ '2.23.42.0.65': ('setct-CapRevReqTBE', ),
+ '2.23.42.0.66': ('setct-CapRevReqTBEX', ),
+ '2.23.42.0.67': ('setct-CapRevResTBE', ),
+ '2.23.42.0.68': ('setct-CredReqTBE', ),
+ '2.23.42.0.69': ('setct-CredReqTBEX', ),
+ '2.23.42.0.70': ('setct-CredResTBE', ),
+ '2.23.42.0.71': ('setct-CredRevReqTBE', ),
+ '2.23.42.0.72': ('setct-CredRevReqTBEX', ),
+ '2.23.42.0.73': ('setct-CredRevResTBE', ),
+ '2.23.42.0.74': ('setct-BatchAdminReqTBE', ),
+ '2.23.42.0.75': ('setct-BatchAdminResTBE', ),
+ '2.23.42.0.76': ('setct-RegFormReqTBE', ),
+ '2.23.42.0.77': ('setct-CertReqTBE', ),
+ '2.23.42.0.78': ('setct-CertReqTBEX', ),
+ '2.23.42.0.79': ('setct-CertResTBE', ),
+ '2.23.42.0.80': ('setct-CRLNotificationTBS', ),
+ '2.23.42.0.81': ('setct-CRLNotificationResTBS', ),
+ '2.23.42.0.82': ('setct-BCIDistributionTBS', ),
+ '2.23.42.1': ('message extensions', 'set-msgExt'),
+ '2.23.42.1.1': ('generic cryptogram', 'setext-genCrypt'),
+ '2.23.42.1.3': ('merchant initiated auth', 'setext-miAuth'),
+ '2.23.42.1.4': ('setext-pinSecure', ),
+ '2.23.42.1.5': ('setext-pinAny', ),
+ '2.23.42.1.7': ('setext-track2', ),
+ '2.23.42.1.8': ('additional verification', 'setext-cv'),
+ '2.23.42.3': ('set-attr', ),
+ '2.23.42.3.0': ('setAttr-Cert', ),
+ '2.23.42.3.0.0': ('set-rootKeyThumb', ),
+ '2.23.42.3.0.1': ('set-addPolicy', ),
+ '2.23.42.3.1': ('payment gateway capabilities', 'setAttr-PGWYcap'),
+ '2.23.42.3.2': ('setAttr-TokenType', ),
+ '2.23.42.3.2.1': ('setAttr-Token-EMV', ),
+ '2.23.42.3.2.2': ('setAttr-Token-B0Prime', ),
+ '2.23.42.3.3': ('issuer capabilities', 'setAttr-IssCap'),
+ '2.23.42.3.3.3': ('setAttr-IssCap-CVM', ),
+ '2.23.42.3.3.3.1': ('generate cryptogram', 'setAttr-GenCryptgrm'),
+ '2.23.42.3.3.4': ('setAttr-IssCap-T2', ),
+ '2.23.42.3.3.4.1': ('encrypted track 2', 'setAttr-T2Enc'),
+ '2.23.42.3.3.4.2': ('cleartext track 2', 'setAttr-T2cleartxt'),
+ '2.23.42.3.3.5': ('setAttr-IssCap-Sig', ),
+ '2.23.42.3.3.5.1': ('ICC or token signature', 'setAttr-TokICCsig'),
+ '2.23.42.3.3.5.2': ('secure device signature', 'setAttr-SecDevSig'),
+ '2.23.42.5': ('set-policy', ),
+ '2.23.42.5.0': ('set-policy-root', ),
+ '2.23.42.7': ('certificate extensions', 'set-certExt'),
+ '2.23.42.7.0': ('setCext-hashedRoot', ),
+ '2.23.42.7.1': ('setCext-certType', ),
+ '2.23.42.7.2': ('setCext-merchData', ),
+ '2.23.42.7.3': ('setCext-cCertRequired', ),
+ '2.23.42.7.4': ('setCext-tunneling', ),
+ '2.23.42.7.5': ('setCext-setExt', ),
+ '2.23.42.7.6': ('setCext-setQualf', ),
+ '2.23.42.7.7': ('setCext-PGWYcapabilities', ),
+ '2.23.42.7.8': ('setCext-TokenIdentifier', ),
+ '2.23.42.7.9': ('setCext-Track2Data', ),
+ '2.23.42.7.10': ('setCext-TokenType', ),
+ '2.23.42.7.11': ('setCext-IssuerCapabilities', ),
+ '2.23.42.8': ('set-brand', ),
+ '2.23.42.8.1': ('set-brand-IATA-ATA', ),
+ '2.23.42.8.4': ('set-brand-Visa', ),
+ '2.23.42.8.5': ('set-brand-MasterCard', ),
+ '2.23.42.8.30': ('set-brand-Diners', ),
+ '2.23.42.8.34': ('set-brand-AmericanExpress', ),
+ '2.23.42.8.35': ('set-brand-JCB', ),
+ '2.23.42.8.6011': ('set-brand-Novus', ),
+ '2.23.43': ('wap', ),
+ '2.23.43.1': ('wap-wsg', ),
+ '2.23.43.1.4': ('wap-wsg-idm-ecid', ),
+ '2.23.43.1.4.1': ('wap-wsg-idm-ecid-wtls1', ),
+ '2.23.43.1.4.3': ('wap-wsg-idm-ecid-wtls3', ),
+ '2.23.43.1.4.4': ('wap-wsg-idm-ecid-wtls4', ),
+ '2.23.43.1.4.5': ('wap-wsg-idm-ecid-wtls5', ),
+ '2.23.43.1.4.6': ('wap-wsg-idm-ecid-wtls6', ),
+ '2.23.43.1.4.7': ('wap-wsg-idm-ecid-wtls7', ),
+ '2.23.43.1.4.8': ('wap-wsg-idm-ecid-wtls8', ),
+ '2.23.43.1.4.9': ('wap-wsg-idm-ecid-wtls9', ),
+ '2.23.43.1.4.10': ('wap-wsg-idm-ecid-wtls10', ),
+ '2.23.43.1.4.11': ('wap-wsg-idm-ecid-wtls11', ),
+ '2.23.43.1.4.12': ('wap-wsg-idm-ecid-wtls12', ),
+}
+# #####################################################################################
+# #####################################################################################
+
+_OID_LOOKUP = dict()
+_NORMALIZE_NAMES = dict()
+_NORMALIZE_NAMES_SHORT = dict()
+
+for dotted, names in _OID_MAP.items():
+ for name in names:
+ if name in _NORMALIZE_NAMES and _OID_LOOKUP[name] != dotted:
+ raise AssertionError(
+ 'Name collision during setup: "{0}" for OIDs {1} and {2}'
+ .format(name, dotted, _OID_LOOKUP[name])
+ )
+ _NORMALIZE_NAMES[name] = names[0]
+ _NORMALIZE_NAMES_SHORT[name] = names[-1]
+ _OID_LOOKUP[name] = dotted
+for alias, original in [('userID', 'userId')]:
+ if alias in _NORMALIZE_NAMES:
+ raise AssertionError(
+ 'Name collision during adding aliases: "{0}" (alias for "{1}") is already mapped to OID {2}'
+ .format(alias, original, _OID_LOOKUP[alias])
+ )
+ _NORMALIZE_NAMES[alias] = original
+ _NORMALIZE_NAMES_SHORT[alias] = _NORMALIZE_NAMES_SHORT[original]
+ _OID_LOOKUP[alias] = _OID_LOOKUP[original]
+
+
+def pyopenssl_normalize_name(name, short=False):
+ nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(name))
+ if nid != 0:
+ b_name = OpenSSL._util.lib.OBJ_nid2ln(nid)
+ name = to_text(OpenSSL._util.ffi.string(b_name))
+ if short:
+ return _NORMALIZE_NAMES_SHORT.get(name, name)
+ else:
+ return _NORMALIZE_NAMES.get(name, name)
+
+
+# #####################################################################################
+# #####################################################################################
+# # This excerpt is dual licensed under the terms of the Apache License, Version
+# # 2.0, and the BSD License. See the LICENSE file at
+# # https://github.com/pyca/cryptography/blob/master/LICENSE for complete details.
+# #
+# # Adapted from cryptography's hazmat/backends/openssl/decode_asn1.py
+# #
+# # Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
+# # Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
+# #
+# # Relevant commits from cryptography project (https://github.com/pyca/cryptography):
+# # pyca/cryptography@719d536dd691e84e208534798f2eb4f82aaa2e07
+# # pyca/cryptography@5ab6d6a5c05572bd1c75f05baf264a2d0001894a
+# # pyca/cryptography@2e776e20eb60378e0af9b7439000d0e80da7c7e3
+# # pyca/cryptography@fb309ed24647d1be9e319b61b1f2aa8ebb87b90b
+# # pyca/cryptography@2917e460993c475c72d7146c50dc3bbc2414280d
+# # pyca/cryptography@3057f91ea9a05fb593825006d87a391286a4d828
+# # pyca/cryptography@d607dd7e5bc5c08854ec0c9baff70ba4a35be36f
+def _obj2txt(openssl_lib, openssl_ffi, obj):
+ # Set to 80 on the recommendation of
+ # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
+ #
+ # But OIDs longer than this occur in real life (e.g. Active
+ # Directory makes some very long OIDs). So we need to detect
+ # and properly handle the case where the default buffer is not
+ # big enough.
+ #
+ buf_len = 80
+ buf = openssl_ffi.new("char[]", buf_len)
+
+ # 'res' is the number of bytes that *would* be written if the
+ # buffer is large enough. If 'res' > buf_len - 1, we need to
+ # alloc a big-enough buffer and go again.
+ res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
+ if res > buf_len - 1: # account for terminating null byte
+ buf_len = res + 1
+ buf = openssl_ffi.new("char[]", buf_len)
+ res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
+ return openssl_ffi.buffer(buf, res)[:].decode()
+# #####################################################################################
+# #####################################################################################
+
+
+def cryptography_get_extensions_from_cert(cert):
+ # Since cryptography won't give us the DER value for an extension
+ # (that is only stored for unrecognized extensions), we have to re-do
+ # the extension parsing outselves.
+ result = dict()
+ backend = cert._backend
+ x509_obj = cert._x509
+
+ for i in range(backend._lib.X509_get_ext_count(x509_obj)):
+ ext = backend._lib.X509_get_ext(x509_obj, i)
+ if ext == backend._ffi.NULL:
+ continue
+ crit = backend._lib.X509_EXTENSION_get_critical(ext)
+ data = backend._lib.X509_EXTENSION_get_data(ext)
+ backend.openssl_assert(data != backend._ffi.NULL)
+ der = backend._ffi.buffer(data.data, data.length)[:]
+ entry = dict(
+ critical=(crit == 1),
+ value=base64.b64encode(der),
+ )
+ oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
+ result[oid] = entry
+ return result
+
+
+def cryptography_get_extensions_from_csr(csr):
+ # Since cryptography won't give us the DER value for an extension
+ # (that is only stored for unrecognized extensions), we have to re-do
+ # the extension parsing outselves.
+ result = dict()
+ backend = csr._backend
+
+ extensions = backend._lib.X509_REQ_get_extensions(csr._x509_req)
+ extensions = backend._ffi.gc(
+ extensions,
+ lambda ext: backend._lib.sk_X509_EXTENSION_pop_free(
+ ext,
+ backend._ffi.addressof(backend._lib._original_lib, "X509_EXTENSION_free")
+ )
+ )
+
+ for i in range(backend._lib.sk_X509_EXTENSION_num(extensions)):
+ ext = backend._lib.sk_X509_EXTENSION_value(extensions, i)
+ if ext == backend._ffi.NULL:
+ continue
+ crit = backend._lib.X509_EXTENSION_get_critical(ext)
+ data = backend._lib.X509_EXTENSION_get_data(ext)
+ backend.openssl_assert(data != backend._ffi.NULL)
+ der = backend._ffi.buffer(data.data, data.length)[:]
+ entry = dict(
+ critical=(crit == 1),
+ value=base64.b64encode(der),
+ )
+ oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
+ result[oid] = entry
+ return result
+
+
+def pyopenssl_get_extensions_from_cert(cert):
+ # While pyOpenSSL allows us to get an extension's DER value, it won't
+ # give us the dotted string for an OID. So we have to do some magic to
+ # get hold of it.
+ result = dict()
+ ext_count = cert.get_extension_count()
+ for i in range(0, ext_count):
+ ext = cert.get_extension(i)
+ entry = dict(
+ critical=bool(ext.get_critical()),
+ value=base64.b64encode(ext.get_data()),
+ )
+ oid = _obj2txt(
+ OpenSSL._util.lib,
+ OpenSSL._util.ffi,
+ OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
+ )
+ # This could also be done a bit simpler:
+ #
+ # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
+ #
+ # Unfortunately this gives the wrong result in case the linked OpenSSL
+ # doesn't know the OID. That's why we have to get the OID dotted string
+ # similarly to how cryptography does it.
+ result[oid] = entry
+ return result
+
+
+def pyopenssl_get_extensions_from_csr(csr):
+ # While pyOpenSSL allows us to get an extension's DER value, it won't
+ # give us the dotted string for an OID. So we have to do some magic to
+ # get hold of it.
+ result = dict()
+ for ext in csr.get_extensions():
+ entry = dict(
+ critical=bool(ext.get_critical()),
+ value=base64.b64encode(ext.get_data()),
+ )
+ oid = _obj2txt(
+ OpenSSL._util.lib,
+ OpenSSL._util.ffi,
+ OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
+ )
+ # This could also be done a bit simpler:
+ #
+ # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
+ #
+ # Unfortunately this gives the wrong result in case the linked OpenSSL
+ # doesn't know the OID. That's why we have to get the OID dotted string
+ # similarly to how cryptography does it.
+ result[oid] = entry
+ return result
+
+
+def cryptography_name_to_oid(name):
+ dotted = _OID_LOOKUP.get(name)
+ if dotted is None:
+ raise OpenSSLObjectError('Cannot find OID for "{0}"'.format(name))
+ return x509.oid.ObjectIdentifier(dotted)
+
+
+def cryptography_oid_to_name(oid, short=False):
+ dotted_string = oid.dotted_string
+ names = _OID_MAP.get(dotted_string)
+ name = names[0] if names else oid._name
+ if short:
+ return _NORMALIZE_NAMES_SHORT.get(name, name)
+ else:
+ return _NORMALIZE_NAMES.get(name, name)
+
+
+def cryptography_get_name(name):
+ '''
+ Given a name string, returns a cryptography x509.Name object.
+ Raises an OpenSSLObjectError if the name is unknown or cannot be parsed.
+ '''
+ try:
+ if name.startswith('DNS:'):
+ return x509.DNSName(to_text(name[4:]))
+ if name.startswith('IP:'):
+ return x509.IPAddress(ipaddress.ip_address(to_text(name[3:])))
+ if name.startswith('email:'):
+ return x509.RFC822Name(to_text(name[6:]))
+ if name.startswith('URI:'):
+ return x509.UniformResourceIdentifier(to_text(name[4:]))
+ except Exception as e:
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}": {1}'.format(name, e))
+ if ':' not in name:
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (forgot "DNS:" prefix?)'.format(name))
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (potentially unsupported by cryptography backend)'.format(name))
+
+
+def _get_hex(bytesstr):
+ if bytesstr is None:
+ return bytesstr
+ data = binascii.hexlify(bytesstr)
+ data = to_text(b':'.join(data[i:i + 2] for i in range(0, len(data), 2)))
+ return data
+
+
+def cryptography_decode_name(name):
+ '''
+ Given a cryptography x509.Name object, returns a string.
+ Raises an OpenSSLObjectError if the name is not supported.
+ '''
+ if isinstance(name, x509.DNSName):
+ return 'DNS:{0}'.format(name.value)
+ if isinstance(name, x509.IPAddress):
+ return 'IP:{0}'.format(name.value.compressed)
+ if isinstance(name, x509.RFC822Name):
+ return 'email:{0}'.format(name.value)
+ if isinstance(name, x509.UniformResourceIdentifier):
+ return 'URI:{0}'.format(name.value)
+ if isinstance(name, x509.DirectoryName):
+ # FIXME: test
+ return 'DirName:' + ''.join(['/{0}:{1}'.format(attribute.oid._name, attribute.value) for attribute in name.value])
+ if isinstance(name, x509.RegisteredID):
+ # FIXME: test
+ return 'RegisteredID:{0}'.format(name.value)
+ if isinstance(name, x509.OtherName):
+ # FIXME: test
+ return '{0}:{1}'.format(name.type_id.dotted_string, _get_hex(name.value))
+ raise OpenSSLObjectError('Cannot decode name "{0}"'.format(name))
+
+
+def _cryptography_get_keyusage(usage):
+ '''
+ Given a key usage identifier string, returns the parameter name used by cryptography's x509.KeyUsage().
+ Raises an OpenSSLObjectError if the identifier is unknown.
+ '''
+ if usage in ('Digital Signature', 'digitalSignature'):
+ return 'digital_signature'
+ if usage in ('Non Repudiation', 'nonRepudiation'):
+ return 'content_commitment'
+ if usage in ('Key Encipherment', 'keyEncipherment'):
+ return 'key_encipherment'
+ if usage in ('Data Encipherment', 'dataEncipherment'):
+ return 'data_encipherment'
+ if usage in ('Key Agreement', 'keyAgreement'):
+ return 'key_agreement'
+ if usage in ('Certificate Sign', 'keyCertSign'):
+ return 'key_cert_sign'
+ if usage in ('CRL Sign', 'cRLSign'):
+ return 'crl_sign'
+ if usage in ('Encipher Only', 'encipherOnly'):
+ return 'encipher_only'
+ if usage in ('Decipher Only', 'decipherOnly'):
+ return 'decipher_only'
+ raise OpenSSLObjectError('Unknown key usage "{0}"'.format(usage))
+
+
+def cryptography_parse_key_usage_params(usages):
+ '''
+ Given a list of key usage identifier strings, returns the parameters for cryptography's x509.KeyUsage().
+ Raises an OpenSSLObjectError if an identifier is unknown.
+ '''
+ params = dict(
+ digital_signature=False,
+ content_commitment=False,
+ key_encipherment=False,
+ data_encipherment=False,
+ key_agreement=False,
+ key_cert_sign=False,
+ crl_sign=False,
+ encipher_only=False,
+ decipher_only=False,
+ )
+ for usage in usages:
+ params[_cryptography_get_keyusage(usage)] = True
+ return params
+
+
+def cryptography_get_basic_constraints(constraints):
+ '''
+ Given a list of constraints, returns a tuple (ca, path_length).
+ Raises an OpenSSLObjectError if a constraint is unknown or cannot be parsed.
+ '''
+ ca = False
+ path_length = None
+ if constraints:
+ for constraint in constraints:
+ if constraint.startswith('CA:'):
+ if constraint == 'CA:TRUE':
+ ca = True
+ elif constraint == 'CA:FALSE':
+ ca = False
+ else:
+ raise OpenSSLObjectError('Unknown basic constraint value "{0}" for CA'.format(constraint[3:]))
+ elif constraint.startswith('pathlen:'):
+ v = constraint[len('pathlen:'):]
+ try:
+ path_length = int(v)
+ except Exception as e:
+ raise OpenSSLObjectError('Cannot parse path length constraint "{0}" ({1})'.format(v, e))
+ else:
+ raise OpenSSLObjectError('Unknown basic constraint "{0}"'.format(constraint))
+ return ca, path_length
+
+
+def binary_exp_mod(f, e, m):
+ '''Computes f^e mod m in O(log e) multiplications modulo m.'''
+ # Compute len_e = floor(log_2(e))
+ len_e = -1
+ x = e
+ while x > 0:
+ x >>= 1
+ len_e += 1
+ # Compute f**e mod m
+ result = 1
+ for k in range(len_e, -1, -1):
+ result = (result * result) % m
+ if ((e >> k) & 1) != 0:
+ result = (result * f) % m
+ return result
+
+
+def simple_gcd(a, b):
+ '''Compute GCD of its two inputs.'''
+ while b != 0:
+ a, b = b, a % b
+ return a
+
+
+def quick_is_not_prime(n):
+ '''Does some quick checks to see if we can poke a hole into the primality of n.
+
+ A result of `False` does **not** mean that the number is prime; it just means
+ that we couldn't detect quickly whether it is not prime.
+ '''
+ if n <= 2:
+ return True
+ # The constant in the next line is the product of all primes < 200
+ if simple_gcd(n, 7799922041683461553249199106329813876687996789903550945093032474868511536164700810) > 1:
+ return True
+ # TODO: maybe do some iterations of Miller-Rabin to increase confidence
+ # (https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test)
+ return False
+
+
+python_version = (sys.version_info[0], sys.version_info[1])
+if python_version >= (2, 7) or python_version >= (3, 1):
+ # Ansible still supports Python 2.6 on remote nodes
+ def count_bits(no):
+ no = abs(no)
+ if no == 0:
+ return 0
+ return no.bit_length()
+else:
+ # Slow, but works
+ def count_bits(no):
+ no = abs(no)
+ count = 0
+ while no > 0:
+ no >>= 1
+ count += 1
+ return count
+
+
+PEM_START = '-----BEGIN '
+PEM_END = '-----'
+PKCS8_PRIVATEKEY_NAMES = ('PRIVATE KEY', 'ENCRYPTED PRIVATE KEY')
+PKCS1_PRIVATEKEY_SUFFIX = ' PRIVATE KEY'
+
+
+def identify_private_key_format(content):
+ '''Given the contents of a private key file, identifies its format.'''
+ # See https://github.com/openssl/openssl/blob/master/crypto/pem/pem_pkey.c#L40-L85
+ # (PEM_read_bio_PrivateKey)
+ # and https://github.com/openssl/openssl/blob/master/include/openssl/pem.h#L46-L47
+ # (PEM_STRING_PKCS8, PEM_STRING_PKCS8INF)
+ try:
+ lines = content.decode('utf-8').splitlines(False)
+ if lines[0].startswith(PEM_START) and lines[0].endswith(PEM_END) and len(lines[0]) > len(PEM_START) + len(PEM_END):
+ name = lines[0][len(PEM_START):-len(PEM_END)]
+ if name in PKCS8_PRIVATEKEY_NAMES:
+ return 'pkcs8'
+ if len(name) > len(PKCS1_PRIVATEKEY_SUFFIX) and name.endswith(PKCS1_PRIVATEKEY_SUFFIX):
+ return 'pkcs1'
+ return 'unknown-pem'
+ except UnicodeDecodeError:
+ pass
+ return 'raw'
+
+
+def cryptography_key_needs_digest_for_signing(key):
+ '''Tests whether the given private key requires a digest algorithm for signing.
+
+ Ed25519 and Ed448 keys do not; they need None to be passed as the digest algorithm.
+ '''
+ if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
+ return False
+ if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
+ return False
+ return True
+
+
+def cryptography_compare_public_keys(key1, key2):
+ '''Tests whether two public keys are the same.
+
+ Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers().
+ '''
+ if CRYPTOGRAPHY_HAS_ED25519:
+ a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
+ b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
+ if a or b:
+ if not a or not b:
+ return False
+ a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ return a == b
+ if CRYPTOGRAPHY_HAS_ED448:
+ a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
+ b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
+ if a or b:
+ if not a or not b:
+ return False
+ a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ return a == b
+ return key1.public_numbers() == key2.public_numbers()
+
+
+if HAS_CRYPTOGRAPHY:
+ REVOCATION_REASON_MAP = {
+ 'unspecified': x509.ReasonFlags.unspecified,
+ 'key_compromise': x509.ReasonFlags.key_compromise,
+ 'ca_compromise': x509.ReasonFlags.ca_compromise,
+ 'affiliation_changed': x509.ReasonFlags.affiliation_changed,
+ 'superseded': x509.ReasonFlags.superseded,
+ 'cessation_of_operation': x509.ReasonFlags.cessation_of_operation,
+ 'certificate_hold': x509.ReasonFlags.certificate_hold,
+ 'privilege_withdrawn': x509.ReasonFlags.privilege_withdrawn,
+ 'aa_compromise': x509.ReasonFlags.aa_compromise,
+ 'remove_from_crl': x509.ReasonFlags.remove_from_crl,
+ }
+ REVOCATION_REASON_MAP_INVERSE = dict()
+ for k, v in REVOCATION_REASON_MAP.items():
+ REVOCATION_REASON_MAP_INVERSE[v] = k
+
+
+def cryptography_decode_revoked_certificate(cert):
+ result = {
+ 'serial_number': cert.serial_number,
+ 'revocation_date': cert.revocation_date,
+ 'issuer': None,
+ 'issuer_critical': False,
+ 'reason': None,
+ 'reason_critical': False,
+ 'invalidity_date': None,
+ 'invalidity_date_critical': False,
+ }
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.CertificateIssuer)
+ result['issuer'] = list(ext.value)
+ result['issuer_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.CRLReason)
+ result['reason'] = ext.value.reason
+ result['reason_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.InvalidityDate)
+ result['invalidity_date'] = ext.value.invalidity_date
+ result['invalidity_date_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ return result
diff --git a/test/support/integration/plugins/module_utils/database.py b/test/support/integration/plugins/module_utils/database.py
new file mode 100644
index 00000000..014939a2
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/database.py
@@ -0,0 +1,142 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
diff --git a/test/support/integration/plugins/module_utils/docker/__init__.py b/test/support/integration/plugins/module_utils/docker/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/__init__.py
diff --git a/test/support/integration/plugins/module_utils/docker/common.py b/test/support/integration/plugins/module_utils/docker/common.py
new file mode 100644
index 00000000..03307250
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/common.py
@@ -0,0 +1,1022 @@
+#
+# Copyright 2016 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import platform
+import re
+import sys
+from datetime import timedelta
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+
+# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException
+except ImportError:
+ # Either docker-py is no longer using requests, or docker-py isn't around either,
+ # or docker-py's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost'
+MIN_DOCKER_VERSION = "1.8.0"
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
+ "Hint: if you do not need Python 2.6 support, try "
+ "`pip uninstall docker-py` instead, followed by `pip install docker`.")
+
+
+class AnsibleDockerClient(Client):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if)
+
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module if no "
+ "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
+ "can leave the other module in a broken state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ if NEEDS_DOCKER_PY2:
+ msg = missing_required_lib("Docker SDK for Python: docker")
+ msg = msg + ", for example via `pip install docker`. The error was: %s"
+ else:
+ msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
+ msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py) for non-Python-2.6 users.
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClient, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.version()['ApiVersion']
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ if min_docker_api_version is not None:
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value):
+ if param_value is not None:
+ # take module parameter value
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return env_value
+
+ # take the default
+ return default_value
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = self.module.params.get(key)
+
+ if self.module.params.get('use_tls'):
+ # support use_tls option in docker_image.py. This will be deprecated.
+ use_tls = self.module.params.get('use_tls')
+ if use_tls == 'encrypt':
+ params['tls'] = True
+ if use_tls == 'verify':
+ params['validate_certs'] = True
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case: if docker.io wasn't there, it can be that
+ # the image wasn't found either (#15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest"):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters).
+ '''
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ if v is True:
+ v = 'true'
+ elif v is False:
+ v = 'false'
+ else:
+ v = str(v)
+ result[str(k)] = v
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = dict(
+ test='test',
+ interval='interval',
+ timeout='timeout',
+ start_period='start_period',
+ retries='retries'
+ )
+
+ duration_options = ['interval', 'timeout', 'start_period']
+
+ for (key, value) in options.items():
+ if value in healthcheck:
+ if healthcheck.get(value) is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value in duration_options:
+ time = convert_duration_to_nanosecond(healthcheck.get(value))
+ if time:
+ result[key] = time
+ elif healthcheck.get(value):
+ result[key] = healthcheck.get(value)
+ if key == 'test':
+ if isinstance(result[key], (tuple, list)):
+ result[key] = [str(e) for e in result[key]]
+ else:
+ result[key] = ['CMD-SHELL', str(result[key])]
+ elif key == 'retries':
+ try:
+ result[key] = int(result[key])
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(result[key])
+ )
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/test/support/integration/plugins/module_utils/docker/swarm.py b/test/support/integration/plugins/module_utils/docker/swarm.py
new file mode 100644
index 00000000..55d94db0
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/swarm.py
@@ -0,0 +1,280 @@
+# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.docker.common import (
+ AnsibleDockerClient,
+ LooseVersion,
+)
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/test/support/integration/plugins/module_utils/ec2.py b/test/support/integration/plugins/module_utils/ec2.py
new file mode 100644
index 00000000..0d28108d
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ec2.py
@@ -0,0 +1,758 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import traceback
+
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import missing_required_lib, env_fallback
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.cloud import CloudRetry
+from ansible.module_utils.six import string_types, binary_type, text_type
+from ansible.module_utils.common.dict_transformations import (
+ camel_dict_to_snake_dict, snake_dict_to_camel_dict,
+ _camel_to_snake, _snake_to_camel,
+)
+
+BOTO_IMP_ERR = None
+try:
+ import boto
+ import boto.ec2 # boto does weird import stuff
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+BOTO3_IMP_ERR = None
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except Exception:
+ BOTO3_IMP_ERR = traceback.format_exc()
+ HAS_BOTO3 = False
+
+try:
+ # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
+ # uses this (and it works as expected). Python 2.6 will trigger the ImportError.
+ from functools import cmp_to_key
+ PY3_COMPARISON = True
+except ImportError:
+ PY3_COMPARISON = False
+
+
+class AnsibleAWSError(Exception):
+ pass
+
+
+def _botocore_exception_maybe():
+ """
+ Allow for boto3 not being installed when using these utils by wrapping
+ botocore.exceptions instead of assigning from it directly.
+ """
+ if HAS_BOTO3:
+ return botocore.exceptions.ClientError
+ return type(None)
+
+
+class AWSRetry(CloudRetry):
+ base_class = _botocore_exception_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.response['Error']['Code']
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This list of failures is based on this API Reference
+ # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
+ #
+ # TooManyRequestsException comes from inside botocore when it
+ # does retrys, unfortunately however it does not try long
+ # enough to allow some services such as API Gateway to
+ # complete configuration. At the moment of writing there is a
+ # botocore/boto3 bug open to fix this.
+ #
+ # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
+ retry_on = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling'
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
+
+
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ try:
+ return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
+ except ValueError as e:
+ module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
+ module.fail_json(msg=to_native(e))
+ except botocore.exceptions.NoRegionError as e:
+ module.fail_json(msg="The %s module requires a region and none was found in configuration, "
+ "environment variables or module parameters" % module._name)
+
+
+def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
+ profile = params.pop('profile_name', None)
+
+ if conn_type not in ['both', 'resource', 'client']:
+ raise ValueError('There is an issue in the calling code. You '
+ 'must specify either both, resource, or client to '
+ 'the conn_type parameter in the boto3_conn function '
+ 'call')
+
+ config = botocore.config.Config(
+ user_agent_extra='Ansible/{0}'.format(__version__),
+ )
+
+ if params.get('config') is not None:
+ config = config.merge(params.pop('config'))
+ if params.get('aws_config') is not None:
+ config = config.merge(params.pop('aws_config'))
+
+ session = boto3.session.Session(
+ profile_name=profile,
+ )
+
+ if conn_type == 'resource':
+ return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ elif conn_type == 'client':
+ return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ else:
+ client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ return client, resource
+
+
+boto3_inventory_conn = _boto3_conn
+
+
+def boto_exception(err):
+ """
+ Extracts the error message from a boto exception.
+
+ :param err: Exception from boto
+ :return: Error message
+ """
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def aws_common_argument_spec():
+ return dict(
+ debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
+ ec2_url=dict(),
+ aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
+ aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
+ validate_certs=dict(default=True, type='bool'),
+ security_token=dict(aliases=['access_token'], no_log=True),
+ profile=dict(),
+ aws_config=dict(type='dict'),
+ )
+
+
+def ec2_argument_spec():
+ spec = aws_common_argument_spec()
+ spec.update(
+ dict(
+ region=dict(aliases=['aws_region', 'ec2_region']),
+ )
+ )
+ return spec
+
+
+def get_aws_region(module, boto3=False):
+ region = module.params.get('region')
+
+ if region:
+ return region
+
+ if 'AWS_REGION' in os.environ:
+ return os.environ['AWS_REGION']
+ if 'AWS_DEFAULT_REGION' in os.environ:
+ return os.environ['AWS_DEFAULT_REGION']
+ if 'EC2_REGION' in os.environ:
+ return os.environ['EC2_REGION']
+
+ if not boto3:
+ if not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
+ # boto.config.get returns None if config not found
+ region = boto.config.get('Boto', 'aws_region')
+ if region:
+ return region
+ return boto.config.get('Boto', 'ec2_region')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+
+ # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
+ try:
+ profile_name = module.params.get('profile')
+ return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound as e:
+ return None
+
+
+def get_aws_connection_info(module, boto3=False):
+
+ # Check module args for credentials, then check environment vars
+ # access_key
+
+ ec2_url = module.params.get('ec2_url')
+ access_key = module.params.get('aws_access_key')
+ secret_key = module.params.get('aws_secret_key')
+ security_token = module.params.get('security_token')
+ region = get_aws_region(module, boto3)
+ profile_name = module.params.get('profile')
+ validate_certs = module.params.get('validate_certs')
+ config = module.params.get('aws_config')
+
+ if not ec2_url:
+ if 'AWS_URL' in os.environ:
+ ec2_url = os.environ['AWS_URL']
+ elif 'EC2_URL' in os.environ:
+ ec2_url = os.environ['EC2_URL']
+
+ if not access_key:
+ if os.environ.get('AWS_ACCESS_KEY_ID'):
+ access_key = os.environ['AWS_ACCESS_KEY_ID']
+ elif os.environ.get('AWS_ACCESS_KEY'):
+ access_key = os.environ['AWS_ACCESS_KEY']
+ elif os.environ.get('EC2_ACCESS_KEY'):
+ access_key = os.environ['EC2_ACCESS_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
+ access_key = boto.config.get('Credentials', 'aws_access_key_id')
+ elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
+ access_key = boto.config.get('default', 'aws_access_key_id')
+ else:
+ # in case access_key came in as empty string
+ access_key = None
+
+ if not secret_key:
+ if os.environ.get('AWS_SECRET_ACCESS_KEY'):
+ secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
+ elif os.environ.get('AWS_SECRET_KEY'):
+ secret_key = os.environ['AWS_SECRET_KEY']
+ elif os.environ.get('EC2_SECRET_KEY'):
+ secret_key = os.environ['EC2_SECRET_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
+ secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
+ elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
+ secret_key = boto.config.get('default', 'aws_secret_access_key')
+ else:
+ # in case secret_key came in as empty string
+ secret_key = None
+
+ if not security_token:
+ if os.environ.get('AWS_SECURITY_TOKEN'):
+ security_token = os.environ['AWS_SECURITY_TOKEN']
+ elif os.environ.get('AWS_SESSION_TOKEN'):
+ security_token = os.environ['AWS_SESSION_TOKEN']
+ elif os.environ.get('EC2_SECURITY_TOKEN'):
+ security_token = os.environ['EC2_SECURITY_TOKEN']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
+ security_token = boto.config.get('Credentials', 'aws_security_token')
+ elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
+ security_token = boto.config.get('default', 'aws_security_token')
+ else:
+ # in case secret_token came in as empty string
+ security_token = None
+
+ if HAS_BOTO3 and boto3:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=security_token)
+ boto_params['verify'] = validate_certs
+
+ if profile_name:
+ boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
+ boto_params['profile_name'] = profile_name
+
+ else:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ security_token=security_token)
+
+ # only set profile_name if passed as an argument
+ if profile_name:
+ boto_params['profile_name'] = profile_name
+
+ boto_params['validate_certs'] = validate_certs
+
+ if config is not None:
+ if HAS_BOTO3 and boto3:
+ boto_params['aws_config'] = botocore.config.Config(**config)
+ elif HAS_BOTO and not boto3:
+ if 'user_agent' in config:
+ sys.modules["boto.connection"].UserAgent = config['user_agent']
+
+ for param, value in boto_params.items():
+ if isinstance(value, binary_type):
+ boto_params[param] = text_type(value, 'utf-8', 'strict')
+
+ return region, ec2_url, boto_params
+
+
+def get_ec2_creds(module):
+ ''' for compatibility mode with old modules that don't/can't yet
+ use ec2_connect method '''
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
+
+
+def boto_fix_security_token_in_profile(conn, profile_name):
+ ''' monkey patch for boto issue boto/boto#2100 '''
+ profile = 'profile ' + profile_name
+ if boto.config.has_option(profile, 'aws_security_token'):
+ conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
+ return conn
+
+
+def connect_to_aws(aws_module, region, **params):
+ try:
+ conn = aws_module.connect_to_region(region, **params)
+ except(boto.provider.ProfileNotFoundError):
+ raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
+ if not conn:
+ if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
+ raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
+ "boto or extend with endpoints_path" % (region, aws_module.__name__))
+ else:
+ raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
+ if params.get('profile_name'):
+ conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
+ return conn
+
+
+def ec2_connect(module):
+
+ """ Return an ec2 connection"""
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+
+ # If we have a region specified, connect to its endpoint.
+ if region:
+ try:
+ ec2 = connect_to_aws(boto.ec2, region, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ elif ec2_url:
+ try:
+ ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="Either region or ec2_url must be specified")
+
+ return ec2
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ if not tags_list:
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+
+
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+
+ """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> ansible_dict_to_boto3_tag_list(tags_dict)
+ {
+ 'MyTagKey': 'MyTagValue'
+ }
+ Returns:
+ List: List of dicts containing tag keys and values
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ """
+
+ tags_list = []
+ for k, v in tags_dict.items():
+ tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
+
+ return tags_list
+
+
+def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
+
+ """ Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
+
+ def get_sg_name(sg, boto3):
+
+ if boto3:
+ return sg['GroupName']
+ else:
+ return sg.name
+
+ def get_sg_id(sg, boto3):
+
+ if boto3:
+ return sg['GroupId']
+ else:
+ return sg.id
+
+ sec_group_id_list = []
+
+ if isinstance(sec_group_list, string_types):
+ sec_group_list = [sec_group_list]
+
+ # Get all security groups
+ if boto3:
+ if vpc_id:
+ filters = [
+ {
+ 'Name': 'vpc-id',
+ 'Values': [
+ vpc_id,
+ ]
+ }
+ ]
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ else:
+ all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+ else:
+ if vpc_id:
+ filters = {'vpc-id': vpc_id}
+ all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
+ else:
+ all_sec_groups = ec2_connection.get_all_security_groups()
+
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ sec_group_name_list = list(set(sec_group_list) - set(unmatched))
+
+ if len(unmatched) > 0:
+ # If we have unmatched names that look like an ID, assume they are
+ import re
+ sec_group_id_list = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ if len(still_unmatched) > 0:
+ raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+
+ sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
+
+ return sec_group_id_list
+
+
+def _hashable_policy(policy, policy_list):
+ """
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', (u's3:PutObjectAcl',)),
+ ('Effect', (u'Allow',)),
+ ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
+ ('Version', (u'2012-10-17',)))]
+
+ """
+ # Amazon will automatically convert bool and int to strings for us
+ if isinstance(policy, bool):
+ return tuple([str(policy).lower()])
+ elif isinstance(policy, int):
+ return tuple([str(policy)])
+
+ if isinstance(policy, list):
+ for each in policy:
+ tupleified = _hashable_policy(each, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append(tupleified)
+ elif isinstance(policy, string_types) or isinstance(policy, binary_type):
+ policy = to_text(policy)
+ # convert root account ARNs to just account IDs
+ if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
+ policy = policy.split(':')[4]
+ return [policy]
+ elif isinstance(policy, dict):
+ sorted_keys = list(policy.keys())
+ sorted_keys.sort()
+ for key in sorted_keys:
+ tupleified = _hashable_policy(policy[key], [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append((key, tupleified))
+
+ # ensure we aren't returning deeply nested structures of length 1
+ if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
+ policy_list = policy_list[0]
+ if isinstance(policy_list, list):
+ if PY3_COMPARISON:
+ policy_list.sort(key=cmp_to_key(py3cmp))
+ else:
+ policy_list.sort()
+ return policy_list
+
+
+def py3cmp(a, b):
+ """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ try:
+ if a > b:
+ return 1
+ elif a < b:
+ return -1
+ else:
+ return 0
+ except TypeError as e:
+ # check to see if they're tuple-string
+ # always say strings are less than tuples (to maintain compatibility with python2)
+ str_ind = to_text(e).find('str')
+ tup_ind = to_text(e).find('tuple')
+ if -1 not in (str_ind, tup_ind):
+ if str_ind < tup_ind:
+ return -1
+ elif tup_ind < str_ind:
+ return 1
+ raise
+
+
+def compare_policies(current_policy, new_policy):
+ """ Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
+ """
+ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
+
+
+def sort_json_policy_dict(policy_dict):
+
+ """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ different orders will return true
+ Args:
+ policy_dict (dict): Dict representing IAM JSON policy.
+ Basic Usage:
+ >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
+ >>> sort_json_policy_dict(my_iam_policy)
+ Returns:
+ Dict: Will return a copy of the policy as a Dict but any List will be sorted
+ {
+ 'Principle': {
+ 'AWS': [ '7', '14', '31', '101' ]
+ }
+ }
+ """
+
+ def value_is_list(my_list):
+
+ checked_list = []
+ for item in my_list:
+ if isinstance(item, dict):
+ checked_list.append(sort_json_policy_dict(item))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ # Sort list. If it's a list of dictionaries, sort by tuple of key-value
+ # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
+ checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
+ return checked_list
+
+ ordered_policy_dict = {}
+ for key, value in policy_dict.items():
+ if isinstance(value, dict):
+ ordered_policy_dict[key] = sort_json_policy_dict(value)
+ elif isinstance(value, list):
+ ordered_policy_dict[key] = value_is_list(value)
+ else:
+ ordered_policy_dict[key] = value
+
+ return ordered_policy_dict
+
+
+def map_complex_type(complex_type, type_map):
+ """
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
+
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ This ensures all keys within the root element are casted and valid integers
+ """
+
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ if key in type_map:
+ if isinstance(type_map[key], list):
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key][0])
+ else:
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key])
+ else:
+ return complex_type
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(map_complex_type(
+ complex_type[i],
+ type_map))
+ elif type_map:
+ return globals()['__builtins__'][type_map](complex_type)
+ return new_type
+
+
+def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
+ """
+ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
+ Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
+ these may not be able to be used out of the box.
+
+ :param current_tags_dict:
+ :param new_tags_dict:
+ :param purge_tags:
+ :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
+ :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
+ """
+
+ tag_key_value_pairs_to_set = {}
+ tag_keys_to_unset = []
+
+ for key in current_tags_dict.keys():
+ if key not in new_tags_dict and purge_tags:
+ tag_keys_to_unset.append(key)
+
+ for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
+ if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
+ tag_key_value_pairs_to_set[key] = new_tags_dict[key]
+
+ return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/test/support/integration/plugins/module_utils/ecs/__init__.py b/test/support/integration/plugins/module_utils/ecs/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ecs/__init__.py
diff --git a/test/support/integration/plugins/module_utils/ecs/api.py b/test/support/integration/plugins/module_utils/ecs/api.py
new file mode 100644
index 00000000..d89b0333
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ecs/api.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is licensed under the
+# Modified BSD License. Modules you write using this snippet, which is embedded
+# dynamically by Ansible, still belong to the author of the module, and may assign
+# their own license to the complete work.
+#
+# Copyright (c), Entrust Datacard Corporation, 2019
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+import traceback
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.urls import Request
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+except ImportError:
+ YAML_FOUND = False
+ YAML_IMP_ERR = traceback.format_exc()
+else:
+ YAML_FOUND = True
+
+valid_file_format = re.compile(r".*(\.)(yml|yaml|json)$")
+
+
+def ecs_client_argument_spec():
+ return dict(
+ entrust_api_user=dict(type='str', required=True),
+ entrust_api_key=dict(type='str', required=True, no_log=True),
+ entrust_api_client_cert_path=dict(type='path', required=True),
+ entrust_api_client_cert_key_path=dict(type='path', required=True, no_log=True),
+ entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
+ )
+
+
+class SessionConfigurationException(Exception):
+ """ Raised if we cannot configure a session with the API """
+
+ pass
+
+
+class RestOperationException(Exception):
+ """ Encapsulate a REST API error """
+
+ def __init__(self, error):
+ self.status = to_native(error.get("status", None))
+ self.errors = [to_native(err.get("message")) for err in error.get("errors", {})]
+ self.message = to_native(" ".join(self.errors))
+
+
+def generate_docstring(operation_spec):
+ """Generate a docstring for an operation defined in operation_spec (swagger)"""
+ # Description of the operation
+ docs = operation_spec.get("description", "No Description")
+ docs += "\n\n"
+
+ # Parameters of the operation
+ parameters = operation_spec.get("parameters", [])
+ if len(parameters) != 0:
+ docs += "\tArguments:\n\n"
+ for parameter in parameters:
+ docs += "{0} ({1}:{2}): {3}\n".format(
+ parameter.get("name"),
+ parameter.get("type", "No Type"),
+ "Required" if parameter.get("required", False) else "Not Required",
+ parameter.get("description"),
+ )
+
+ return docs
+
+
+def bind(instance, method, operation_spec):
+ def binding_scope_fn(*args, **kwargs):
+ return method(instance, *args, **kwargs)
+
+ # Make sure we don't confuse users; add the proper name and documentation to the function.
+ # Users can use !help(<function>) to get help on the function from interactive python or pdb
+ operation_name = operation_spec.get("operationId").split("Using")[0]
+ binding_scope_fn.__name__ = str(operation_name)
+ binding_scope_fn.__doc__ = generate_docstring(operation_spec)
+
+ return binding_scope_fn
+
+
+class RestOperation(object):
+ def __init__(self, session, uri, method, parameters=None):
+ self.session = session
+ self.method = method
+ if parameters is None:
+ self.parameters = {}
+ else:
+ self.parameters = parameters
+ self.url = "{scheme}://{host}{base_path}{uri}".format(scheme="https", host=session._spec.get("host"), base_path=session._spec.get("basePath"), uri=uri)
+
+ def restmethod(self, *args, **kwargs):
+ """Do the hard work of making the request here"""
+
+ # gather named path parameters and do substitution on the URL
+ if self.parameters:
+ path_parameters = {}
+ body_parameters = {}
+ query_parameters = {}
+ for x in self.parameters:
+ expected_location = x.get("in")
+ key_name = x.get("name", None)
+ key_value = kwargs.get(key_name, None)
+ if expected_location == "path" and key_name and key_value:
+ path_parameters.update({key_name: key_value})
+ elif expected_location == "body" and key_name and key_value:
+ body_parameters.update({key_name: key_value})
+ elif expected_location == "query" and key_name and key_value:
+ query_parameters.update({key_name: key_value})
+
+ if len(body_parameters.keys()) >= 1:
+ body_parameters = body_parameters.get(list(body_parameters.keys())[0])
+ else:
+ body_parameters = None
+ else:
+ path_parameters = {}
+ query_parameters = {}
+ body_parameters = None
+
+ # This will fail if we have not set path parameters with a KeyError
+ url = self.url.format(**path_parameters)
+ if query_parameters:
+ # modify the URL to add path parameters
+ url = url + "?" + urlencode(query_parameters)
+
+ try:
+ if body_parameters:
+ body_parameters_json = json.dumps(body_parameters)
+ response = self.session.request.open(method=self.method, url=url, data=body_parameters_json)
+ else:
+ response = self.session.request.open(method=self.method, url=url)
+ request_error = False
+ except HTTPError as e:
+ # An HTTPError has the same methods available as a valid response from request.open
+ response = e
+ request_error = True
+
+ # Return the result if JSON and success ({} for empty responses)
+ # Raise an exception if there was a failure.
+ try:
+ result_code = response.getcode()
+ result = json.loads(response.read())
+ except ValueError:
+ result = {}
+
+ if result or result == {}:
+ if result_code and result_code < 400:
+ return result
+ else:
+ raise RestOperationException(result)
+
+ # Raise a generic RestOperationException if this fails
+ raise RestOperationException({"status": result_code, "errors": [{"message": "REST Operation Failed"}]})
+
+
+class Resource(object):
+ """ Implement basic CRUD operations against a path. """
+
+ def __init__(self, session):
+ self.session = session
+ self.parameters = {}
+
+ for url in session._spec.get("paths").keys():
+ methods = session._spec.get("paths").get(url)
+ for method in methods.keys():
+ operation_spec = methods.get(method)
+ operation_name = operation_spec.get("operationId", None)
+ parameters = operation_spec.get("parameters")
+
+ if not operation_name:
+ if method.lower() == "post":
+ operation_name = "Create"
+ elif method.lower() == "get":
+ operation_name = "Get"
+ elif method.lower() == "put":
+ operation_name = "Update"
+ elif method.lower() == "delete":
+ operation_name = "Delete"
+ elif method.lower() == "patch":
+ operation_name = "Patch"
+ else:
+ raise SessionConfigurationException(to_native("Invalid REST method type {0}".format(method)))
+
+ # Get the non-parameter parts of the URL and append to the operation name
+ # e.g /application/version -> GetApplicationVersion
+ # e.g. /application/{id} -> GetApplication
+ # This may lead to duplicates, which we must prevent.
+ operation_name += re.sub(r"{(.*)}", "", url).replace("/", " ").title().replace(" ", "")
+ operation_spec["operationId"] = operation_name
+
+ op = RestOperation(session, url, method, parameters)
+ setattr(self, operation_name, bind(self, op.restmethod, operation_spec))
+
+
+# Session to encapsulate the connection parameters of the module_utils Request object, the api spec, etc
+class ECSSession(object):
+ def __init__(self, name, **kwargs):
+ """
+ Initialize our session
+ """
+
+ self._set_config(name, **kwargs)
+
+ def client(self):
+ resource = Resource(self)
+ return resource
+
+ def _set_config(self, name, **kwargs):
+ headers = {
+ "Content-Type": "application/json",
+ "Connection": "keep-alive",
+ }
+ self.request = Request(headers=headers, timeout=60)
+
+ configurators = [self._read_config_vars]
+ for configurator in configurators:
+ self._config = configurator(name, **kwargs)
+ if self._config:
+ break
+ if self._config is None:
+ raise SessionConfigurationException(to_native("No Configuration Found."))
+
+ # set up auth if passed
+ entrust_api_user = self.get_config("entrust_api_user")
+ entrust_api_key = self.get_config("entrust_api_key")
+ if entrust_api_user and entrust_api_key:
+ self.request.url_username = entrust_api_user
+ self.request.url_password = entrust_api_key
+ else:
+ raise SessionConfigurationException(to_native("User and key must be provided."))
+
+ # set up client certificate if passed (support all-in one or cert + key)
+ entrust_api_cert = self.get_config("entrust_api_cert")
+ entrust_api_cert_key = self.get_config("entrust_api_cert_key")
+ if entrust_api_cert:
+ self.request.client_cert = entrust_api_cert
+ if entrust_api_cert_key:
+ self.request.client_key = entrust_api_cert_key
+ else:
+ raise SessionConfigurationException(to_native("Client certificate for authentication to the API must be provided."))
+
+ # set up the spec
+ entrust_api_specification_path = self.get_config("entrust_api_specification_path")
+
+ if not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path):
+ raise SessionConfigurationException(to_native("OpenAPI specification was not found at location {0}.".format(entrust_api_specification_path)))
+ if not valid_file_format.match(entrust_api_specification_path):
+ raise SessionConfigurationException(to_native("OpenAPI specification filename must end in .json, .yml or .yaml"))
+
+ self.verify = True
+
+ if entrust_api_specification_path.startswith("http"):
+ try:
+ http_response = Request().open(method="GET", url=entrust_api_specification_path)
+ http_response_contents = http_response.read()
+ if entrust_api_specification_path.endswith(".json"):
+ self._spec = json.load(http_response_contents)
+ elif entrust_api_specification_path.endswith(".yml") or entrust_api_specification_path.endswith(".yaml"):
+ self._spec = yaml.safe_load(http_response_contents)
+ except HTTPError as e:
+ raise SessionConfigurationException(to_native("Error downloading specification from address '{0}', received error code '{1}'".format(
+ entrust_api_specification_path, e.getcode())))
+ else:
+ with open(entrust_api_specification_path) as f:
+ if ".json" in entrust_api_specification_path:
+ self._spec = json.load(f)
+ elif ".yml" in entrust_api_specification_path or ".yaml" in entrust_api_specification_path:
+ self._spec = yaml.safe_load(f)
+
+ def get_config(self, item):
+ return self._config.get(item, None)
+
+ def _read_config_vars(self, name, **kwargs):
+ """ Read configuration from variables passed to the module. """
+ config = {}
+
+ entrust_api_specification_path = kwargs.get("entrust_api_specification_path")
+ if not entrust_api_specification_path or (not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path)):
+ raise SessionConfigurationException(
+ to_native(
+ "Parameter provided for entrust_api_specification_path of value '{0}' was not a valid file path or HTTPS address.".format(
+ entrust_api_specification_path
+ )
+ )
+ )
+
+ for required_file in ["entrust_api_cert", "entrust_api_cert_key"]:
+ file_path = kwargs.get(required_file)
+ if not file_path or not os.path.isfile(file_path):
+ raise SessionConfigurationException(
+ to_native("Parameter provided for {0} of value '{1}' was not a valid file path.".format(required_file, file_path))
+ )
+
+ for required_var in ["entrust_api_user", "entrust_api_key"]:
+ if not kwargs.get(required_var):
+ raise SessionConfigurationException(to_native("Parameter provided for {0} was missing.".format(required_var)))
+
+ config["entrust_api_cert"] = kwargs.get("entrust_api_cert")
+ config["entrust_api_cert_key"] = kwargs.get("entrust_api_cert_key")
+ config["entrust_api_specification_path"] = kwargs.get("entrust_api_specification_path")
+ config["entrust_api_user"] = kwargs.get("entrust_api_user")
+ config["entrust_api_key"] = kwargs.get("entrust_api_key")
+
+ return config
+
+
+def ECSClient(entrust_api_user=None, entrust_api_key=None, entrust_api_cert=None, entrust_api_cert_key=None, entrust_api_specification_path=None):
+ """Create an ECS client"""
+
+ if not YAML_FOUND:
+ raise SessionConfigurationException(missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
+
+ if entrust_api_specification_path is None:
+ entrust_api_specification_path = "https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml"
+
+ # Not functionally necessary with current uses of this module_util, but better to be explicit for future use cases
+ entrust_api_user = to_text(entrust_api_user)
+ entrust_api_key = to_text(entrust_api_key)
+ entrust_api_cert_key = to_text(entrust_api_cert_key)
+ entrust_api_specification_path = to_text(entrust_api_specification_path)
+
+ return ECSSession(
+ "ecs",
+ entrust_api_user=entrust_api_user,
+ entrust_api_key=entrust_api_key,
+ entrust_api_cert=entrust_api_cert,
+ entrust_api_cert_key=entrust_api_cert_key,
+ entrust_api_specification_path=entrust_api_specification_path,
+ ).client()
diff --git a/test/support/integration/plugins/module_utils/mysql.py b/test/support/integration/plugins/module_utils/mysql.py
new file mode 100644
index 00000000..46198f36
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/mysql.py
@@ -0,0 +1,106 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
+# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+try:
+ import pymysql as mysql_driver
+ _mysql_cursor_param = 'cursor'
+except ImportError:
+ try:
+ import MySQLdb as mysql_driver
+ import MySQLdb.cursors
+ _mysql_cursor_param = 'cursorclass'
+ except ImportError:
+ mysql_driver = None
+
+mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.'
+
+
+def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
+ connect_timeout=30, autocommit=False):
+ config = {}
+
+ if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
+ config['ssl'] = {}
+
+ if module.params['login_unix_socket']:
+ config['unix_socket'] = module.params['login_unix_socket']
+ else:
+ config['host'] = module.params['login_host']
+ config['port'] = module.params['login_port']
+
+ if os.path.exists(config_file):
+ config['read_default_file'] = config_file
+
+ # If login_user or login_password are given, they should override the
+ # config file
+ if login_user is not None:
+ config['user'] = login_user
+ if login_password is not None:
+ config['passwd'] = login_password
+ if ssl_cert is not None:
+ config['ssl']['cert'] = ssl_cert
+ if ssl_key is not None:
+ config['ssl']['key'] = ssl_key
+ if ssl_ca is not None:
+ config['ssl']['ca'] = ssl_ca
+ if db is not None:
+ config['db'] = db
+ if connect_timeout is not None:
+ config['connect_timeout'] = connect_timeout
+
+ if _mysql_cursor_param == 'cursor':
+ # In case of PyMySQL driver:
+ db_connection = mysql_driver.connect(autocommit=autocommit, **config)
+ else:
+ # In case of MySQLdb driver
+ db_connection = mysql_driver.connect(**config)
+ if autocommit:
+ db_connection.autocommit(True)
+
+ if cursor_class == 'DictCursor':
+ return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection
+ else:
+ return db_connection.cursor(), db_connection
+
+
+def mysql_common_argument_spec():
+ return dict(
+ login_user=dict(type='str', default=None),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=3306),
+ login_unix_socket=dict(type='str'),
+ config_file=dict(type='path', default='~/.my.cnf'),
+ connect_timeout=dict(type='int', default=30),
+ client_cert=dict(type='path', aliases=['ssl_cert']),
+ client_key=dict(type='path', aliases=['ssl_key']),
+ ca_cert=dict(type='path', aliases=['ssl_ca']),
+ )
diff --git a/test/support/integration/plugins/module_utils/net_tools/__init__.py b/test/support/integration/plugins/module_utils/net_tools/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/net_tools/__init__.py
diff --git a/test/support/integration/plugins/module_utils/network/__init__.py b/test/support/integration/plugins/module_utils/network/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/__init__.py
diff --git a/test/support/integration/plugins/module_utils/network/common/__init__.py b/test/support/integration/plugins/module_utils/network/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/common/__init__.py
diff --git a/test/support/integration/plugins/module_utils/network/common/utils.py b/test/support/integration/plugins/module_utils/network/common/utils.py
new file mode 100644
index 00000000..80317387
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/common/utils.py
@@ -0,0 +1,643 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Networking tools for network modules only
+
+import re
+import ast
+import operator
+import socket
+import json
+
+from itertools import chain
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils import basic
+from ansible.module_utils.parsing.convert_bool import boolean
+
+# Backwards compatibility for 3rd party modules
+# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
+# up and have modules import directly themself.
+from ansible.module_utils.common.network import ( # noqa: F401
+ to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS
+)
+
+try:
+ from jinja2 import Environment, StrictUndefined
+ from jinja2.exceptions import UndefinedError
+ HAS_JINJA2 = True
+except ImportError:
+ HAS_JINJA2 = False
+
+
+OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le'])
+ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')])
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple, set)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, string_types):
+ item = to_text(item).split('\n')
+ yield item
+
+
+def transform_commands(module):
+ transform = ComplexList(dict(
+ command=dict(key=True),
+ output=dict(),
+ prompt=dict(type='list'),
+ answer=dict(type='list'),
+ newline=dict(type='bool', default=True),
+ sendonly=dict(type='bool', default=False),
+ check_all=dict(type='bool', default=False),
+ ), module)
+
+ return transform(module.params['commands'])
+
+
+def sort_list(val):
+ if isinstance(val, list):
+ return sorted(val)
+ return val
+
+
+class Entity(object):
+ """Transforms a dict to with an argument spec
+
+ This class will take a dict and apply an Ansible argument spec to the
+ values. The resulting dict will contain all of the keys in the param
+ with appropriate values set.
+
+ Example::
+
+ argument_spec = dict(
+ command=dict(key=True),
+ display=dict(default='text', choices=['text', 'json']),
+ validate=dict(type='bool')
+ )
+ transform = Entity(module, argument_spec)
+ value = dict(command='foo')
+ result = transform(value)
+ print result
+ {'command': 'foo', 'display': 'text', 'validate': None}
+
+ Supported argument spec:
+ * key - specifies how to map a single value to a dict
+ * read_from - read and apply the argument_spec from the module
+ * required - a value is required
+ * type - type of value (uses AnsibleModule type checker)
+ * fallback - implements fallback function
+ * choices - set of valid options
+ * default - default value
+ """
+
+ def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False):
+ args = [] if args is None else args
+
+ self._attributes = attrs or {}
+ self._module = module
+
+ for arg in args:
+ self._attributes[arg] = dict()
+ if from_argspec:
+ self._attributes[arg]['read_from'] = arg
+ if keys and arg in keys:
+ self._attributes[arg]['key'] = True
+
+ self.attr_names = frozenset(self._attributes.keys())
+
+ _has_key = False
+
+ for name, attr in iteritems(self._attributes):
+ if attr.get('read_from'):
+ if attr['read_from'] not in self._module.argument_spec:
+ module.fail_json(msg='argument %s does not exist' % attr['read_from'])
+ spec = self._module.argument_spec.get(attr['read_from'])
+ for key, value in iteritems(spec):
+ if key not in attr:
+ attr[key] = value
+
+ if attr.get('key'):
+ if _has_key:
+ module.fail_json(msg='only one key value can be specified')
+ _has_key = True
+ attr['required'] = True
+
+ def serialize(self):
+ return self._attributes
+
+ def to_dict(self, value):
+ obj = {}
+ for name, attr in iteritems(self._attributes):
+ if attr.get('key'):
+ obj[name] = value
+ else:
+ obj[name] = attr.get('default')
+ return obj
+
+ def __call__(self, value, strict=True):
+ if not isinstance(value, dict):
+ value = self.to_dict(value)
+
+ if strict:
+ unknown = set(value).difference(self.attr_names)
+ if unknown:
+ self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown))
+
+ for name, attr in iteritems(self._attributes):
+ if value.get(name) is None:
+ value[name] = attr.get('default')
+
+ if attr.get('fallback') and not value.get(name):
+ fallback = attr.get('fallback', (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ value[name] = fallback_strategy(*fallback_args, **fallback_kwargs)
+ except basic.AnsibleFallbackNotFound:
+ continue
+
+ if attr.get('required') and value.get(name) is None:
+ self._module.fail_json(msg='missing required attribute %s' % name)
+
+ if 'choices' in attr:
+ if value[name] not in attr['choices']:
+ self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name]))
+
+ if value[name] is not None:
+ value_type = attr.get('type', 'str')
+ type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
+ type_checker(value[name])
+ elif value.get(name):
+ value[name] = self._module.params[name]
+
+ return value
+
+
+class EntityCollection(Entity):
+ """Extends ```Entity``` to handle a list of dicts """
+
+ def __call__(self, iterable, strict=True):
+ if iterable is None:
+ iterable = [super(EntityCollection, self).__call__(self._module.params, strict)]
+
+ if not isinstance(iterable, (list, tuple)):
+ self._module.fail_json(msg='value must be an iterable')
+
+ return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable]
+
+
+# these two are for backwards compatibility and can be removed once all of the
+# modules that use them are updated
+class ComplexDict(Entity):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
+
+
+class ComplexList(EntityCollection):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
+
+
+def dict_diff(base, comparable):
+ """ Generate a dict object of differences
+
+ This function will compare two dict objects and return the difference
+ between them as a dict object. For scalar values, the key will reflect
+ the updated value. If the key does not exist in `comparable`, then then no
+ key will be returned. For lists, the value in comparable will wholly replace
+ the value in base for the key. For dicts, the returned value will only
+ return keys that are different.
+
+ :param base: dict object to base the diff on
+ :param comparable: dict object to compare against base
+
+ :returns: new dict object with differences
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(comparable, dict):
+ if comparable is None:
+ comparable = dict()
+ else:
+ raise AssertionError("`comparable` must be of type <dict>")
+
+ updates = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ item = comparable.get(key)
+ if item is not None:
+ sub_diff = dict_diff(value, comparable[key])
+ if sub_diff:
+ updates[key] = sub_diff
+ else:
+ comparable_value = comparable.get(key)
+ if comparable_value is not None:
+ if sort_list(base[key]) != sort_list(comparable_value):
+ updates[key] = comparable_value
+
+ for key in set(comparable.keys()).difference(base.keys()):
+ updates[key] = comparable.get(key)
+
+ return updates
+
+
+def dict_merge(base, other):
+ """ Return a new dict object that combines base and other
+
+ This will create a new dict object that is a combination of the key/value
+ pairs from base and other. When both keys exist, the value will be
+ selected from other. If the value is a list object, the two lists will
+ be combined and duplicate entries removed.
+
+ :param base: dict object to serve as base
+ :param other: dict object to combine with base
+
+ :returns: new combined dict object
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(other, dict):
+ raise AssertionError("`other` must be of type <dict>")
+
+ combined = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ if isinstance(other[key], Mapping):
+ combined[key] = dict_merge(value, other[key])
+ else:
+ combined[key] = other[key]
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ elif isinstance(value, list):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ try:
+ combined[key] = list(set(chain(value, item)))
+ except TypeError:
+ value.extend([i for i in item if i not in value])
+ combined[key] = value
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ else:
+ if key in other:
+ other_value = other.get(key)
+ if other_value is not None:
+ if sort_list(base[key]) != sort_list(other_value):
+ combined[key] = other_value
+ else:
+ combined[key] = value
+ else:
+ combined[key] = other_value
+ else:
+ combined[key] = value
+
+ for key in set(other.keys()).difference(base.keys()):
+ combined[key] = other.get(key)
+
+ return combined
+
+
+def param_list_to_dict(param_list, unique_key="name", remove_key=True):
+ """Rotates a list of dictionaries to be a dictionary of dictionaries.
+
+ :param param_list: The aforementioned list of dictionaries
+ :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
+ behind this key will be the key each dictionary can be found at in the new root dictionary
+ :param remove_key: If True, remove unique_key from the individual dictionaries before returning.
+ """
+ param_dict = {}
+ for params in param_list:
+ params = params.copy()
+ if remove_key:
+ name = params.pop(unique_key)
+ else:
+ name = params.get(unique_key)
+ param_dict[name] = params
+
+ return param_dict
+
+
+def conditional(expr, val, cast=None):
+ match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I)
+ if match:
+ op, arg = match.groups()
+ else:
+ op = 'eq'
+ if ' ' in str(expr):
+ raise AssertionError('invalid expression: cannot contain spaces')
+ arg = expr
+
+ if cast is None and val is not None:
+ arg = type(val)(arg)
+ elif callable(cast):
+ arg = cast(arg)
+ val = cast(val)
+
+ op = next((oper for alias, oper in ALIASES if op == alias), op)
+
+ if not hasattr(operator, op) and op not in OPERATORS:
+ raise ValueError('unknown operator: %s' % op)
+
+ func = getattr(operator, op)
+ return func(val, arg)
+
+
+def ternary(value, true_val, false_val):
+ ''' value ? true_val : false_val '''
+ if value:
+ return true_val
+ else:
+ return false_val
+
+
+def remove_default_spec(spec):
+ for item in spec:
+ if 'default' in spec[item]:
+ del spec[item]['default']
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def validate_prefix(prefix):
+ if prefix and not 0 <= int(prefix) <= 32:
+ return False
+ return True
+
+
+def load_provider(spec, args):
+ provider = args.get('provider') or {}
+ for key, value in iteritems(spec):
+ if key not in provider:
+ if 'fallback' in value:
+ provider[key] = _fallback(value['fallback'])
+ elif 'default' in value:
+ provider[key] = value['default']
+ else:
+ provider[key] = None
+ if 'authorize' in provider:
+ # Coerce authorize to provider if a string has somehow snuck in.
+ provider['authorize'] = boolean(provider['authorize'] or False)
+ args['provider'] = provider
+ return provider
+
+
+def _fallback(fallback):
+ strategy = fallback[0]
+ args = []
+ kwargs = {}
+
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ kwargs = item
+ else:
+ args = item
+ try:
+ return strategy(*args, **kwargs)
+ except basic.AnsibleFallbackNotFound:
+ pass
+
+
+def generate_dict(spec):
+ """
+ Generate dictionary which is in sync with argspec
+
+ :param spec: A dictionary that is the argspec of the module
+ :rtype: A dictionary
+ :returns: A dictionary in sync with argspec with default value
+ """
+ obj = {}
+ if not spec:
+ return obj
+
+ for key, val in iteritems(spec):
+ if 'default' in val:
+ dct = {key: val['default']}
+ elif 'type' in val and val['type'] == 'dict':
+ dct = {key: generate_dict(val['options'])}
+ else:
+ dct = {key: None}
+ obj.update(dct)
+ return obj
+
+
+def parse_conf_arg(cfg, arg):
+ """
+ Parse config based on argument
+
+ :param cfg: A text string which is a line of configuration.
+ :param arg: A text string which is to be matched.
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r'%s (.+)(\n|$)' % arg, cfg, re.M)
+ if match:
+ result = match.group(1).strip()
+ else:
+ result = None
+ return result
+
+
+def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str='no'):
+ """
+ Parse config based on command
+
+ :param cfg: A text string which is a line of configuration.
+ :param cmd: A text string which is the command to be matched
+ :param res1: A text string to be returned if the command is present
+ :param res2: A text string to be returned if the negate command
+ is present
+ :param delete_str: A text string to identify the start of the
+ negate command
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r'\n\s+%s(\n|$)' % cmd, cfg)
+ if match:
+ return res1
+ if res2 is not None:
+ match = re.search(r'\n\s+%s %s(\n|$)' % (delete_str, cmd), cfg)
+ if match:
+ return res2
+ return None
+
+
+def get_xml_conf_arg(cfg, path, data='text'):
+ """
+ :param cfg: The top level configuration lxml Element tree object
+ :param path: The relative xpath w.r.t to top level element (cfg)
+ to be searched in the xml hierarchy
+ :param data: The type of data to be returned for the matched xml node.
+ Valid values are text, tag, attrib, with default as text.
+ :return: Returns the required type for the matched xml node or else None
+ """
+ match = cfg.xpath(path)
+ if len(match):
+ if data == 'tag':
+ result = getattr(match[0], 'tag')
+ elif data == 'attrib':
+ result = getattr(match[0], 'attrib')
+ else:
+ result = getattr(match[0], 'text')
+ else:
+ result = None
+ return result
+
+
+def remove_empties(cfg_dict):
+ """
+ Generate final config dictionary
+
+ :param cfg_dict: A dictionary parsed in the facts system
+ :rtype: A dictionary
+ :returns: A dictionary by eliminating keys that have null values
+ """
+ final_cfg = {}
+ if not cfg_dict:
+ return final_cfg
+
+ for key, val in iteritems(cfg_dict):
+ dct = None
+ if isinstance(val, dict):
+ child_val = remove_empties(val)
+ if child_val:
+ dct = {key: child_val}
+ elif (isinstance(val, list) and val
+ and all([isinstance(x, dict) for x in val])):
+ child_val = [remove_empties(x) for x in val]
+ if child_val:
+ dct = {key: child_val}
+ elif val not in [None, [], {}, (), '']:
+ dct = {key: val}
+ if dct:
+ final_cfg.update(dct)
+ return final_cfg
+
+
+def validate_config(spec, data):
+ """
+ Validate if the input data against the AnsibleModule spec format
+ :param spec: Ansible argument spec
+ :param data: Data to be validated
+ :return:
+ """
+ params = basic._ANSIBLE_ARGS
+ basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': data}))
+ validated_data = basic.AnsibleModule(spec).params
+ basic._ANSIBLE_ARGS = params
+ return validated_data
+
+
+def search_obj_in_list(name, lst, key='name'):
+ if not lst:
+ return None
+ else:
+ for item in lst:
+ if item.get(key) == name:
+ return item
+
+
+class Template:
+
+ def __init__(self):
+ if not HAS_JINJA2:
+ raise ImportError("jinja2 is required but does not appear to be installed. "
+ "It can be installed using `pip install jinja2`")
+
+ self.env = Environment(undefined=StrictUndefined)
+ self.env.filters.update({'ternary': ternary})
+
+ def __call__(self, value, variables=None, fail_on_undefined=True):
+ variables = variables or {}
+
+ if not self.contains_vars(value):
+ return value
+
+ try:
+ value = self.env.from_string(value).render(variables)
+ except UndefinedError:
+ if not fail_on_undefined:
+ return None
+ raise
+
+ if value:
+ try:
+ return ast.literal_eval(value)
+ except Exception:
+ return str(value)
+ else:
+ return None
+
+ def contains_vars(self, data):
+ if isinstance(data, string_types):
+ for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string):
+ if marker in data:
+ return True
+ return False
diff --git a/test/support/integration/plugins/module_utils/postgres.py b/test/support/integration/plugins/module_utils/postgres.py
new file mode 100644
index 00000000..63811c30
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/postgres.py
@@ -0,0 +1,330 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
+# Most of this was originally added by other creators in the postgresql_user module.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+psycopg2 = None # This line needs for unit tests
+try:
+ import psycopg2
+ HAS_PSYCOPG2 = True
+except ImportError:
+ HAS_PSYCOPG2 = False
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from distutils.version import LooseVersion
+
+
+def postgres_common_argument_spec():
+ """
+ Return a dictionary with connection options.
+
+ The options are commonly used by most of PostgreSQL modules.
+ """
+ return dict(
+ login_user=dict(default='postgres'),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default=''),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ )
+
+
+def ensure_required_libs(module):
+ """Check required libraries."""
+ if not HAS_PSYCOPG2:
+ module.fail_json(msg=missing_required_lib('psycopg2'))
+
+ if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
+ module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
+
+
+def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
+ """Connect to a PostgreSQL database.
+
+ Return psycopg2 connection object.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ conn_params (dict) -- dictionary with connection parameters
+
+ Kwargs:
+ autocommit (bool) -- commit automatically (default False)
+ fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
+ """
+ ensure_required_libs(module)
+
+ db_connection = None
+ try:
+ db_connection = psycopg2.connect(**conn_params)
+ if autocommit:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ # Switch role, if specified:
+ if module.params.get('session_role'):
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ try:
+ cursor.execute('SET ROLE "%s"' % module.params['session_role'])
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e))
+ finally:
+ cursor.close()
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least '
+ 'version 8.4 to support sslrootcert')
+
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ except Exception as e:
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ return db_connection
+
+
+def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False):
+ """Execute SQL.
+
+ Auxiliary function for PostgreSQL user classes.
+
+ Returns a query result if possible or True/False if ddl=True arg was passed.
+ It necessary for statements that don't return any result (like DDL queries).
+
+ Args:
+ obj (obj) -- must be an object of a user class.
+ The object must have module (AnsibleModule class object) and
+ cursor (psycopg cursor object) attributes
+ query (str) -- SQL query to execute
+
+ Kwargs:
+ query_params (dict or tuple) -- Query parameters to prevent SQL injections,
+ could be a dict or tuple
+ ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
+ (default False)
+ add_to_executed (bool) -- append the query to obj.executed_queries attribute
+ dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
+ to obj.executed_queries list and return True (default False)
+ """
+
+ if dont_exec:
+ # This is usually needed to return queries in check_mode
+ # without execution
+ query = obj.cursor.mogrify(query, query_params)
+ if add_to_executed:
+ obj.executed_queries.append(query)
+
+ return True
+
+ try:
+ if query_params is not None:
+ obj.cursor.execute(query, query_params)
+ else:
+ obj.cursor.execute(query)
+
+ if add_to_executed:
+ if query_params is not None:
+ obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
+ else:
+ obj.executed_queries.append(query)
+
+ if not ddl:
+ res = obj.cursor.fetchall()
+ return res
+ return True
+ except Exception as e:
+ obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ return False
+
+
+def get_conn_params(module, params_dict, warn_db_default=True):
+ """Get connection parameters from the passed dictionary.
+
+ Return a dictionary with parameters to connect to PostgreSQL server.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ params_dict (dict) -- dictionary with variables
+
+ Kwargs:
+ warn_db_default (bool) -- warn that the default DB is used (default True)
+ """
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the return dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ # Might be different in the modules:
+ if params_dict.get('db'):
+ params_map['db'] = 'database'
+ elif params_dict.get('database'):
+ params_map['database'] = 'database'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'database'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+
+ kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
+ if is_localhost and params_dict["login_unix_socket"] != "":
+ kw["host"] = params_dict["login_unix_socket"]
+
+ return kw
+
+
+class PgMembership(object):
+ def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
+ self.module = module
+ self.cursor = cursor
+ self.target_roles = [r.strip() for r in target_roles]
+ self.groups = [r.strip() for r in groups]
+ self.executed_queries = []
+ self.granted = {}
+ self.revoked = {}
+ self.fail_on_role = fail_on_role
+ self.non_existent_roles = []
+ self.changed = False
+ self.__check_roles_exist()
+
+ def grant(self):
+ for group in self.groups:
+ self.granted[group] = []
+
+ for role in self.target_roles:
+ # If role is in a group now, pass:
+ if self.__check_membership(group, role):
+ continue
+
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, ddl=True)
+
+ if self.changed:
+ self.granted[group].append(role)
+
+ return self.changed
+
+ def revoke(self):
+ for group in self.groups:
+ self.revoked[group] = []
+
+ for role in self.target_roles:
+ # If role is not in a group now, pass:
+ if not self.__check_membership(group, role):
+ continue
+
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, ddl=True)
+
+ if self.changed:
+ self.revoked[group].append(role)
+
+ return self.changed
+
+ def __check_membership(self, src_role, dst_role):
+ query = ("SELECT ARRAY(SELECT b.rolname FROM "
+ "pg_catalog.pg_auth_members m "
+ "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(dst_role)s")
+
+ res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
+ membership = []
+ if res:
+ membership = res[0][0]
+
+ if not membership:
+ return False
+
+ if src_role in membership:
+ return True
+
+ return False
+
+ def __check_roles_exist(self):
+ existent_groups = self.__roles_exist(self.groups)
+ existent_roles = self.__roles_exist(self.target_roles)
+
+ for group in self.groups:
+ if group not in existent_groups:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % group)
+ else:
+ self.module.warn("Role %s does not exist, pass" % group)
+ self.non_existent_roles.append(group)
+
+ for role in self.target_roles:
+ if role not in existent_roles:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % role)
+ else:
+ self.module.warn("Role %s does not exist, pass" % role)
+
+ if role not in self.groups:
+ self.non_existent_roles.append(role)
+
+ else:
+ if self.fail_on_role:
+ self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
+ else:
+ self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
+
+ # Update role lists, excluding non existent roles:
+ self.groups = [g for g in self.groups if g not in self.non_existent_roles]
+
+ self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
+
+ def __roles_exist(self, roles):
+ tmp = ["'" + x + "'" for x in roles]
+ query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
+ return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/test/support/integration/plugins/module_utils/rabbitmq.py b/test/support/integration/plugins/module_utils/rabbitmq.py
new file mode 100644
index 00000000..cf764006
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/rabbitmq.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Jorge Rodriguez <jorge.rodriguez@tiriel.eu>
+# Copyright: (c) 2018, John Imison <john+github@imison.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six.moves.urllib import parse as urllib_parse
+from mimetypes import MimeTypes
+
+import os
+import json
+import traceback
+
+PIKA_IMP_ERR = None
+try:
+ import pika
+ import pika.exceptions
+ from pika import spec
+ HAS_PIKA = True
+except ImportError:
+ PIKA_IMP_ERR = traceback.format_exc()
+ HAS_PIKA = False
+
+
+def rabbitmq_argument_spec():
+ return dict(
+ login_user=dict(type='str', default='guest'),
+ login_password=dict(type='str', default='guest', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='str', default='15672'),
+ login_protocol=dict(type='str', default='http', choices=['http', 'https']),
+ ca_cert=dict(type='path', aliases=['cacert']),
+ client_cert=dict(type='path', aliases=['cert']),
+ client_key=dict(type='path', aliases=['key']),
+ vhost=dict(type='str', default='/'),
+ )
+
+
+# notification/rabbitmq_basic_publish.py
+class RabbitClient():
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.check_required_library()
+ self.check_host_params()
+ self.url = self.params['url']
+ self.proto = self.params['proto']
+ self.username = self.params['username']
+ self.password = self.params['password']
+ self.host = self.params['host']
+ self.port = self.params['port']
+ self.vhost = self.params['vhost']
+ self.queue = self.params['queue']
+ self.headers = self.params['headers']
+ self.cafile = self.params['cafile']
+ self.certfile = self.params['certfile']
+ self.keyfile = self.params['keyfile']
+
+ if self.host is not None:
+ self.build_url()
+
+ if self.cafile is not None:
+ self.append_ssl_certs()
+
+ self.connect_to_rabbitmq()
+
+ def check_required_library(self):
+ if not HAS_PIKA:
+ self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR)
+
+ def check_host_params(self):
+ # Fail if url is specified and other conflicting parameters have been specified
+ if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
+ self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.")
+
+ # Fail if url not specified and there is a missing parameter to build the url
+ if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
+ self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.")
+
+ def append_ssl_certs(self):
+ ssl_options = {}
+ if self.cafile:
+ ssl_options['cafile'] = self.cafile
+ if self.certfile:
+ ssl_options['certfile'] = self.certfile
+ if self.keyfile:
+ ssl_options['keyfile'] = self.keyfile
+
+ self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options))
+
+ @staticmethod
+ def rabbitmq_argument_spec():
+ return dict(
+ url=dict(type='str'),
+ proto=dict(type='str', choices=['amqp', 'amqps']),
+ host=dict(type='str'),
+ port=dict(type='int'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ vhost=dict(type='str'),
+ queue=dict(type='str')
+ )
+
+ ''' Consider some file size limits here '''
+ def _read_file(self, path):
+ try:
+ with open(path, "rb") as file_handle:
+ return file_handle.read()
+ except IOError as e:
+ self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e)))
+
+ @staticmethod
+ def _check_file_mime_type(path):
+ mime = MimeTypes()
+ return mime.guess_type(path)
+
+ def build_url(self):
+ self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto,
+ self.username,
+ self.password,
+ self.host,
+ self.port,
+ self.vhost)
+
+ def connect_to_rabbitmq(self):
+ """
+ Function to connect to rabbitmq using username and password
+ """
+ try:
+ parameters = pika.URLParameters(self.url)
+ except Exception as e:
+ self.module.fail_json(msg="URL malformed: %s" % to_native(e))
+
+ try:
+ self.connection = pika.BlockingConnection(parameters)
+ except Exception as e:
+ self.module.fail_json(msg="Connection issue: %s" % to_native(e))
+
+ try:
+ self.conn_channel = self.connection.channel()
+ except pika.exceptions.AMQPChannelError as e:
+ self.close_connection()
+ self.module.fail_json(msg="Channel issue: %s" % to_native(e))
+
+ def close_connection(self):
+ try:
+ self.connection.close()
+ except pika.exceptions.AMQPConnectionError:
+ pass
+
+ def basic_publish(self):
+ self.content_type = self.params.get("content_type")
+
+ if self.params.get("body") is not None:
+ args = dict(
+ body=self.params.get("body"),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers))
+
+ # If src (file) is defined and content_type is left as default, do a mime lookup on the file
+ if self.params.get("src") is not None and self.content_type == 'text/plain':
+ self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0]
+ self.headers.update(
+ filename=os.path.basename(self.params.get("src"))
+ )
+
+ args = dict(
+ body=self._read_file(self.params.get("src")),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type,
+ delivery_mode=1,
+ headers=self.headers
+ ))
+ elif self.params.get("src") is not None:
+ args = dict(
+ body=self._read_file(self.params.get("src")),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type,
+ delivery_mode=1,
+ headers=self.headers
+ ))
+
+ try:
+ # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue.
+ if self.queue is None:
+ result = self.conn_channel.queue_declare(durable=self.params.get("durable"),
+ exclusive=self.params.get("exclusive"),
+ auto_delete=self.params.get("auto_delete"))
+ self.conn_channel.confirm_delivery()
+ self.queue = result.method.queue
+ else:
+ self.conn_channel.queue_declare(queue=self.queue,
+ durable=self.params.get("durable"),
+ exclusive=self.params.get("exclusive"),
+ auto_delete=self.params.get("auto_delete"))
+ self.conn_channel.confirm_delivery()
+ except Exception as e:
+ self.module.fail_json(msg="Queue declare issue: %s" % to_native(e))
+
+ # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150
+ if args['routing_key'] is None:
+ args['routing_key'] = self.queue
+
+ if args['exchange'] is None:
+ args['exchange'] = ''
+
+ try:
+ self.conn_channel.basic_publish(**args)
+ return True
+ except pika.exceptions.UnroutableError:
+ return False