summaryrefslogtreecommitdiffstats
path: root/test/support/integration/plugins/inventory
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /test/support/integration/plugins/inventory
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/support/integration/plugins/inventory')
-rw-r--r--test/support/integration/plugins/inventory/aws_ec2.py760
-rw-r--r--test/support/integration/plugins/inventory/docker_swarm.py351
-rw-r--r--test/support/integration/plugins/inventory/foreman.py295
3 files changed, 1406 insertions, 0 deletions
diff --git a/test/support/integration/plugins/inventory/aws_ec2.py b/test/support/integration/plugins/inventory/aws_ec2.py
new file mode 100644
index 00000000..09c42cf9
--- /dev/null
+++ b/test/support/integration/plugins/inventory/aws_ec2.py
@@ -0,0 +1,760 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: aws_ec2
+ plugin_type: inventory
+ short_description: EC2 inventory source
+ requirements:
+ - boto3
+ - botocore
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from Amazon Web Services EC2.
+ - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
+ notes:
+ - If no credentials are provided and the control node has an associated IAM instance profile then the
+ role will be used for authentication.
+ author:
+ - Sloane Hertel (@s-hertel)
+ options:
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+ plugin:
+ description: Token that ensures this is a source file for the plugin.
+ required: True
+ choices: ['aws_ec2']
+ iam_role_arn:
+ description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
+ credentials with enough privilege to perform the AssumeRole action.
+ version_added: '2.9'
+ regions:
+ description:
+ - A list of regions in which to describe EC2 instances.
+ - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
+ type: list
+ default: []
+ hostnames:
+ description:
+ - A list in order of precedence for hostname variables.
+ - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
+ type: list
+ default: []
+ filters:
+ description:
+ - A dictionary of filter value pairs.
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ type: dict
+ default: {}
+ include_extra_api_calls:
+ description:
+ - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
+ - Spot instances may be persistent and instances may have associated events.
+ type: bool
+ default: False
+ version_added: '2.8'
+ strict_permissions:
+ description:
+ - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
+ - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
+ type: bool
+ default: True
+ use_contrib_script_compatible_sanitization:
+ description:
+ - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
+ This option allows you to override that, in efforts to allow migration from the old inventory script and
+ matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
+ To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
+ you will need to replace hyphens with underscores via the regex_replace filter for those entries.
+ - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
+ otherwise the core engine will just use the standard sanitization on top.
+ - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
+ which group names end up being used as.
+ type: bool
+ default: False
+ version_added: '2.8'
+'''
+
+EXAMPLES = '''
+# Minimal example using environment vars or instance role credentials
+# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
+plugin: aws_ec2
+regions:
+ - us-east-1
+
+# Example using filters, ignoring permission errors, and specifying the hostname precedence
+plugin: aws_ec2
+boto_profile: aws_profile
+# Populate inventory with instances in these regions
+regions:
+ - us-east-1
+ - us-east-2
+filters:
+ # All instances with their `Environment` tag set to `dev`
+ tag:Environment: dev
+ # All dev and QA hosts
+ tag:Environment:
+ - dev
+ - qa
+ instance.group-id: sg-xxxxxxxx
+# Ignores 403 errors rather than failing
+strict_permissions: False
+# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
+# inventory_hostname use compose (see example below).
+hostnames:
+ - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
+ - tag:CustomDNSName
+ - dns-name
+ - private-ip-address
+
+# Example using constructed features to create groups and set ansible_host
+plugin: aws_ec2
+regions:
+ - us-east-1
+ - us-west-1
+# keyed_groups may be used to create custom groups
+strict: False
+keyed_groups:
+ # Add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'architecture'
+ # Add hosts to tag_Name_Value groups for each Name/Value tag pair
+ - prefix: tag
+ key: tags
+ # Add hosts to e.g. instance_type_z3_tiny
+ - prefix: instance_type
+ key: instance_type
+ # Create security_groups_sg_abcd1234 group for each SG
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ # Create a group for each value of the Application tag
+ - key: tags.Application
+ separator: ''
+ # Create a group per region e.g. aws_region_us_east_2
+ - key: placement.region
+ prefix: aws_region
+ # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
+ - key: tags['Role']
+ prefix: foo
+ parent_group: "project"
+# Set individual variables with compose
+compose:
+ # Use the private IP address to connect to the host
+ # (note: this does not modify inventory_hostname, which is set via I(hostnames))
+ ansible_host: private_ip_address
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+from ansible.module_utils.six import string_types
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
+
+display = Display()
+
+# The mappings give an array of keys to get from the filter name to the value
+# returned by boto3's EC2 describe_instances method.
+
+instance_meta_filter_to_boto_attr = {
+ 'group-id': ('Groups', 'GroupId'),
+ 'group-name': ('Groups', 'GroupName'),
+ 'network-interface.attachment.instance-owner-id': ('OwnerId',),
+ 'owner-id': ('OwnerId',),
+ 'requester-id': ('RequesterId',),
+ 'reservation-id': ('ReservationId',),
+}
+
+instance_data_filter_to_boto_attr = {
+ 'affinity': ('Placement', 'Affinity'),
+ 'architecture': ('Architecture',),
+ 'availability-zone': ('Placement', 'AvailabilityZone'),
+ 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
+ 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
+ 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
+ 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
+ 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
+ 'client-token': ('ClientToken',),
+ 'dns-name': ('PublicDnsName',),
+ 'host-id': ('Placement', 'HostId'),
+ 'hypervisor': ('Hypervisor',),
+ 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
+ 'image-id': ('ImageId',),
+ 'instance-id': ('InstanceId',),
+ 'instance-lifecycle': ('InstanceLifecycle',),
+ 'instance-state-code': ('State', 'Code'),
+ 'instance-state-name': ('State', 'Name'),
+ 'instance-type': ('InstanceType',),
+ 'instance.group-id': ('SecurityGroups', 'GroupId'),
+ 'instance.group-name': ('SecurityGroups', 'GroupName'),
+ 'ip-address': ('PublicIpAddress',),
+ 'kernel-id': ('KernelId',),
+ 'key-name': ('KeyName',),
+ 'launch-index': ('AmiLaunchIndex',),
+ 'launch-time': ('LaunchTime',),
+ 'monitoring-state': ('Monitoring', 'State'),
+ 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
+ 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
+ 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
+ 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
+ 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
+ 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
+ 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
+ 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
+ 'network-interface.attachment.instance-id': ('InstanceId',),
+ 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
+ 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
+ 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
+ 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
+ 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
+ 'network-interface.description': ('NetworkInterfaces', 'Description'),
+ 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
+ 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
+ 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
+ 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
+ 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
+ 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
+ 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
+ # 'network-interface.requester-id': (),
+ 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.status': ('NetworkInterfaces', 'Status'),
+ 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
+ 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
+ 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
+ 'placement-group-name': ('Placement', 'GroupName'),
+ 'platform': ('Platform',),
+ 'private-dns-name': ('PrivateDnsName',),
+ 'private-ip-address': ('PrivateIpAddress',),
+ 'product-code': ('ProductCodes', 'ProductCodeId'),
+ 'product-code.type': ('ProductCodes', 'ProductCodeType'),
+ 'ramdisk-id': ('RamdiskId',),
+ 'reason': ('StateTransitionReason',),
+ 'root-device-name': ('RootDeviceName',),
+ 'root-device-type': ('RootDeviceType',),
+ 'source-dest-check': ('SourceDestCheck',),
+ 'spot-instance-request-id': ('SpotInstanceRequestId',),
+ 'state-reason-code': ('StateReason', 'Code'),
+ 'state-reason-message': ('StateReason', 'Message'),
+ 'subnet-id': ('SubnetId',),
+ 'tag': ('Tags',),
+ 'tag-key': ('Tags',),
+ 'tag-value': ('Tags',),
+ 'tenancy': ('Placement', 'Tenancy'),
+ 'virtualization-type': ('VirtualizationType',),
+ 'vpc-id': ('VpcId',),
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'aws_ec2'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ self.group_prefix = 'aws_ec2_'
+
+ # credentials
+ self.boto_profile = None
+ self.aws_secret_access_key = None
+ self.aws_access_key_id = None
+ self.aws_security_token = None
+ self.iam_role_arn = None
+
+ def _compile_values(self, obj, attr):
+ '''
+ :param obj: A list or dict of instance attributes
+ :param attr: A key
+ :return The value(s) found via the attr
+ '''
+ if obj is None:
+ return
+
+ temp_obj = []
+
+ if isinstance(obj, list) or isinstance(obj, tuple):
+ for each in obj:
+ value = self._compile_values(each, attr)
+ if value:
+ temp_obj.append(value)
+ else:
+ temp_obj = obj.get(attr)
+
+ has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
+ if has_indexes and len(temp_obj) == 1:
+ return temp_obj[0]
+
+ return temp_obj
+
+ def _get_boto_attr_chain(self, filter_name, instance):
+ '''
+ :param filter_name: The filter
+ :param instance: instance dict returned by boto3 ec2 describe_instances()
+ '''
+ allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
+ if filter_name not in allowed_filters:
+ raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
+ allowed_filters))
+ if filter_name in instance_data_filter_to_boto_attr:
+ boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
+ else:
+ boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
+
+ instance_value = instance
+ for attribute in boto_attr_list:
+ instance_value = self._compile_values(instance_value, attribute)
+ return instance_value
+
+ def _get_credentials(self):
+ '''
+ :return A dictionary of boto client credentials
+ '''
+ boto_params = {}
+ for credential in (('aws_access_key_id', self.aws_access_key_id),
+ ('aws_secret_access_key', self.aws_secret_access_key),
+ ('aws_session_token', self.aws_security_token)):
+ if credential[1]:
+ boto_params[credential[0]] = credential[1]
+
+ return boto_params
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+
+ credentials = self._get_credentials()
+ iam_role_arn = self.iam_role_arn
+
+ if not regions:
+ try:
+ # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
+ client = self._get_connection(credentials)
+ resp = client.describe_regions()
+ regions = [x['RegionName'] for x in resp.get('Regions', [])]
+ except botocore.exceptions.NoRegionError:
+ # above seems to fail depending on boto3 version, ignore and lets try something else
+ pass
+
+ # fallback to local list hardcoded in boto3 if still no regions
+ if not regions:
+ session = boto3.Session()
+ regions = session.get_available_regions('ec2')
+
+ # I give up, now you MUST give me regions
+ if not regions:
+ raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
+
+ for region in regions:
+ connection = self._get_connection(credentials, region)
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_instances_by_region(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions in which to describe instances
+ :param filters: a list of boto3 filter dictionaries
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ :return A list of instance dictionaries
+ '''
+ all_instances = []
+
+ for connection, region in self._boto3_conn(regions):
+ try:
+ # By default find non-terminated/terminating instances
+ if not any([f['Name'] == 'instance-state-name' for f in filters]):
+ filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
+ paginator = connection.get_paginator('describe_instances')
+ reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
+ instances = []
+ for r in reservations:
+ new_instances = r['Instances']
+ for instance in new_instances:
+ instance.update(self._get_reservation_details(r))
+ if self.get_option('include_extra_api_calls'):
+ instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
+ instances.extend(new_instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
+ instances = []
+ else:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+ except botocore.exceptions.BotoCoreError as e:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+
+ all_instances.extend(instances)
+
+ return sorted(all_instances, key=lambda x: x['InstanceId'])
+
+ def _get_reservation_details(self, reservation):
+ return {
+ 'OwnerId': reservation['OwnerId'],
+ 'RequesterId': reservation.get('RequesterId', ''),
+ 'ReservationId': reservation['ReservationId']
+ }
+
+ def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
+ host_vars = {'Events': '', 'Persistent': False}
+ try:
+ kwargs = {'InstanceIds': [instance_id]}
+ host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
+ if spot_instance:
+ try:
+ kwargs = {'SpotInstanceRequestIds': [spot_instance]}
+ host_vars['Persistent'] = bool(
+ connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
+ return host_vars
+
+ def _get_tag_hostname(self, preference, instance):
+ tag_hostnames = preference.split('tag:', 1)[1]
+ if ',' in tag_hostnames:
+ tag_hostnames = tag_hostnames.split(',')
+ else:
+ tag_hostnames = [tag_hostnames]
+ tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
+ for v in tag_hostnames:
+ if '=' in v:
+ tag_name, tag_value = v.split('=')
+ if tags.get(tag_name) == tag_value:
+ return to_text(tag_name) + "_" + to_text(tag_value)
+ else:
+ tag_value = tags.get(v)
+ if tag_value:
+ return to_text(tag_value)
+ return None
+
+ def _get_hostname(self, instance, hostnames):
+ '''
+ :param instance: an instance dict returned by boto3 ec2 describe_instances()
+ :param hostnames: a list of hostname destination variables in order of preference
+ :return the preferred identifer for the host
+ '''
+ if not hostnames:
+ hostnames = ['dns-name', 'private-dns-name']
+
+ hostname = None
+ for preference in hostnames:
+ if 'tag' in preference:
+ if not preference.startswith('tag:'):
+ raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
+ hostname = self._get_tag_hostname(preference, instance)
+ else:
+ hostname = self._get_boto_attr_chain(preference, instance)
+ if hostname:
+ break
+ if hostname:
+ if ':' in to_text(hostname):
+ return self._sanitize_group_name((to_text(hostname)))
+ else:
+ return to_text(hostname)
+
+ def _query(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions to query
+ :param filters: a list of boto3 filter dictionaries
+ :param hostnames: a list of hostname destination variables in order of preference
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ '''
+ return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
+
+ def _populate(self, groups, hostnames):
+ for group in groups:
+ group = self.inventory.add_group(group)
+ self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
+ self.inventory.add_child('all', group)
+
+ def _add_hosts(self, hosts, group, hostnames):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ :param hostnames: a list of hostname destination variables in order of preference
+ '''
+ for host in hosts:
+ hostname = self._get_hostname(host, hostnames)
+
+ host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
+ host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
+
+ # Allow easier grouping by region
+ host['placement']['region'] = host['placement']['availability_zone'][:-1]
+
+ if not hostname:
+ continue
+ self.inventory.add_host(hostname, group=group)
+ for hostvar, hostval in host.items():
+ self.inventory.set_variable(hostname, hostvar, hostval)
+
+ # Use constructed if applicable
+
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+
+ self.boto_profile = self.get_option('aws_profile')
+ self.aws_access_key_id = self.get_option('aws_access_key')
+ self.aws_secret_access_key = self.get_option('aws_secret_key')
+ self.aws_security_token = self.get_option('aws_security_token')
+ self.iam_role_arn = self.get_option('iam_role_arn')
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ session = botocore.session.get_session()
+ try:
+ credentials = session.get_credentials().get_frozen_credentials()
+ except AttributeError:
+ pass
+ else:
+ self.aws_access_key_id = credentials.access_key
+ self.aws_secret_access_key = credentials.secret_key
+ self.aws_security_token = credentials.token
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
+ return True
+ display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+
+ if self.get_option('use_contrib_script_compatible_sanitization'):
+ self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
+
+ self._set_credentials()
+
+ # get user specifications
+ regions = self.get_option('regions')
+ filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
+ hostnames = self.get_option('hostnames')
+ strict_permissions = self.get_option('strict_permissions')
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query(regions, filters, strict_permissions)
+
+ self._populate(results, hostnames)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+
+ @staticmethod
+ def _legacy_script_compatible_group_sanitization(name):
+
+ # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
+ regex = re.compile(r"[^A-Za-z0-9\_\-]")
+
+ return regex.sub('_', name)
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ if not tags_list:
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
diff --git a/test/support/integration/plugins/inventory/docker_swarm.py b/test/support/integration/plugins/inventory/docker_swarm.py
new file mode 100644
index 00000000..d0a95ca0
--- /dev/null
+++ b/test/support/integration/plugins/inventory/docker_swarm.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ plugin_type: inventory
+ version_added: '2.8'
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
+ recognize it as it's own.
+ type: str
+ required: true
+ choices: docker_swarm
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: yes
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: no
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by docker-py.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: no
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: yes
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: yes
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: False
+keyed_groups:
+ # add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # e.g. a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ from docker.errors import TLSParameterError
+ from docker.tls import TLSConfig
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/test/support/integration/plugins/inventory/foreman.py b/test/support/integration/plugins/inventory/foreman.py
new file mode 100644
index 00000000..43073f81
--- /dev/null
+++ b/test/support/integration/plugins/inventory/foreman.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: foreman
+ plugin_type: inventory
+ short_description: foreman inventory source
+ version_added: "2.6"
+ requirements:
+ - requests >= 1.1
+ description:
+ - Get inventory hosts from the foreman service.
+ - "Uses a configuration file as an inventory source, it must end in ``.foreman.yml`` or ``.foreman.yaml`` and has a ``plugin: foreman`` entry."
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ options:
+ plugin:
+ description: the name of this plugin, it should always be set to 'foreman' for this plugin to recognize it as it's own.
+ required: True
+ choices: ['foreman']
+ url:
+ description: url to foreman
+ default: 'http://localhost:3000'
+ env:
+ - name: FOREMAN_SERVER
+ version_added: "2.8"
+ user:
+ description: foreman authentication user
+ required: True
+ env:
+ - name: FOREMAN_USER
+ version_added: "2.8"
+ password:
+ description: foreman authentication password
+ required: True
+ env:
+ - name: FOREMAN_PASSWORD
+ version_added: "2.8"
+ validate_certs:
+ description: verify SSL certificate if using https
+ type: boolean
+ default: False
+ group_prefix:
+ description: prefix to apply to foreman groups
+ default: foreman_
+ vars_prefix:
+ description: prefix to apply to host variables, does not include facts nor params
+ default: foreman_
+ want_facts:
+ description: Toggle, if True the plugin will retrieve host facts from the server
+ type: boolean
+ default: False
+ want_params:
+ description: Toggle, if true the inventory will retrieve 'all_parameters' information as host vars
+ type: boolean
+ default: False
+ want_hostcollections:
+ description: Toggle, if true the plugin will create Ansible groups for host collections
+ type: boolean
+ default: False
+ version_added: '2.10'
+ want_ansible_ssh_host:
+ description: Toggle, if true the plugin will populate the ansible_ssh_host variable to explicitly specify the connection target
+ type: boolean
+ default: False
+ version_added: '2.10'
+
+'''
+
+EXAMPLES = '''
+# my.foreman.yml
+plugin: foreman
+url: http://localhost:2222
+user: ansible-tester
+password: secure
+validate_certs: False
+'''
+
+from distutils.version import LooseVersion
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name, Constructable
+
+# 3rd party imports
+try:
+ import requests
+ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ raise ImportError
+except ImportError:
+ raise AnsibleError('This script requires python-requests 1.1 as a minimum version')
+
+from requests.auth import HTTPBasicAuth
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable, Constructable):
+ ''' Host inventory parser for ansible using foreman as source. '''
+
+ NAME = 'foreman'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.foreman_url = None
+
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('foreman.yaml', 'foreman.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "foreman.yaml" nor "foreman.yml"')
+ return valid
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.auth = HTTPBasicAuth(self.get_option('user'), to_bytes(self.get_option('password')))
+ self.session.verify = self.get_option('validate_certs')
+ return self.session
+
+ def _get_json(self, url, ignore_errors=None):
+
+ if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
+
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {url: ''}
+
+ results = []
+ s = self._get_session()
+ params = {'page': 1, 'per_page': 250}
+ while True:
+ ret = s.get(url, params=params)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+
+ # process results
+ # FIXME: This assumes 'return type' matches a specific query,
+ # it will break if we expand the queries and they dont have different types
+ if 'results' not in json:
+ # /hosts/:id dos not have a 'results' key
+ results = json
+ break
+ elif isinstance(json['results'], MutableMapping):
+ # /facts are returned as dict in 'results'
+ results = json['results']
+ break
+ else:
+ # /hosts 's 'results' is a list of all hosts, returned is paginated
+ results = results + json['results']
+
+ # check for end of paging
+ if len(results) >= json['subtotal']:
+ break
+ if len(json['results']) == 0:
+ self.display.warning("Did not make any progress during loop. expected %d got %d" % (json['subtotal'], len(results)))
+ break
+
+ # get next page
+ params['page'] += 1
+
+ self._cache[self.cache_key][url] = results
+
+ return self._cache[self.cache_key][url]
+
+ def _get_hosts(self):
+ return self._get_json("%s/api/v2/hosts" % self.foreman_url)
+
+ def _get_all_params_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ ret = self._get_json(url, [404])
+ if not ret or not isinstance(ret, MutableMapping) or not ret.get('all_parameters', False):
+ return {}
+ return ret.get('all_parameters')
+
+ def _get_facts_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_host_data_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_facts(self, host):
+ """Fetch all host facts of the host"""
+
+ ret = self._get_facts_by_id(host['id'])
+ if len(ret.values()) == 0:
+ facts = {}
+ elif len(ret.values()) == 1:
+ facts = list(ret.values())[0]
+ else:
+ raise ValueError("More than one set of facts returned for '%s'" % host)
+ return facts
+
+ def _populate(self):
+
+ for host in self._get_hosts():
+
+ if host.get('name'):
+ host_name = self.inventory.add_host(host['name'])
+
+ # create directly mapped groups
+ group_name = host.get('hostgroup_title', host.get('hostgroup_name'))
+ if group_name:
+ group_name = to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group_name.lower().replace(" ", "")))
+ group_name = self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, host_name)
+
+ # set host vars from host info
+ try:
+ for k, v in host.items():
+ if k not in ('name', 'hostgroup_title', 'hostgroup_name'):
+ try:
+ self.inventory.set_variable(host_name, self.get_option('vars_prefix') + k, v)
+ except ValueError as e:
+ self.display.warning("Could not set host info hostvar for %s, skipping %s: %s" % (host, k, to_text(e)))
+ except ValueError as e:
+ self.display.warning("Could not get host info for %s, skipping: %s" % (host_name, to_text(e)))
+
+ # set host vars from params
+ if self.get_option('want_params'):
+ for p in self._get_all_params_by_id(host['id']):
+ try:
+ self.inventory.set_variable(host_name, p['name'], p['value'])
+ except ValueError as e:
+ self.display.warning("Could not set hostvar %s to '%s' for the '%s' host, skipping: %s" %
+ (p['name'], to_native(p['value']), host, to_native(e)))
+
+ # set host vars from facts
+ if self.get_option('want_facts'):
+ self.inventory.set_variable(host_name, 'foreman_facts', self._get_facts(host))
+
+ # create group for host collections
+ if self.get_option('want_hostcollections'):
+ host_data = self._get_host_data_by_id(host['id'])
+ hostcollections = host_data.get('host_collections')
+ if hostcollections:
+ # Create Ansible groups for host collections
+ for hostcollection in hostcollections:
+ try:
+ hostcollection_group = to_safe_group_name('%shostcollection_%s' % (self.get_option('group_prefix'),
+ hostcollection['name'].lower().replace(" ", "")))
+ hostcollection_group = self.inventory.add_group(hostcollection_group)
+ self.inventory.add_child(hostcollection_group, host_name)
+ except ValueError as e:
+ self.display.warning("Could not create groups for host collections for %s, skipping: %s" % (host_name, to_text(e)))
+
+ # put ansible_ssh_host as hostvar
+ if self.get_option('want_ansible_ssh_host'):
+ for key in ('ip', 'ipv4', 'ipv6'):
+ if host.get(key):
+ try:
+ self.inventory.set_variable(host_name, 'ansible_ssh_host', host[key])
+ break
+ except ValueError as e:
+ self.display.warning("Could not set hostvar ansible_ssh_host to '%s' for the '%s' host, skipping: %s" %
+ (host[key], host_name, to_text(e)))
+
+ strict = self.get_option('strict')
+
+ hostvars = self.inventory.get_host(host_name).get_vars()
+ self._set_composite_vars(self.get_option('compose'), hostvars, host_name, strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host_name, strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host_name, strict)
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.foreman_url = self.get_option('url')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ # actually populate inventory
+ self._populate()