summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/aws/plugins/modules
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
commit38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch)
tree356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/community/aws/plugins/modules
parentAdding upstream version 7.7.0+dfsg. (diff)
downloadansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz
ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/community/aws/plugins/modules')
-rw-r--r--ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py82
-rw-r--r--ansible_collections/community/aws/plugins/modules/acm_certificate.py203
-rw-r--r--ansible_collections/community/aws/plugins/modules/acm_certificate_info.py81
-rw-r--r--ansible_collections/community/aws/plugins/modules/api_gateway.py295
-rw-r--r--ansible_collections/community/aws/plugins/modules/api_gateway_domain.py165
-rw-r--r--ansible_collections/community/aws/plugins/modules/api_gateway_info.py156
-rw-r--r--ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py262
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py45
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py130
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py93
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py370
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py91
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py75
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py145
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_policy.py335
-rw-r--r--ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py123
-rw-r--r--ansible_collections/community/aws/plugins/modules/aws_region_info.py98
-rw-r--r--ansible_collections/community/aws/plugins/modules/batch_compute_environment.py206
-rw-r--r--ansible_collections/community/aws/plugins/modules/batch_job_definition.py151
-rw-r--r--ansible_collections/community/aws/plugins/modules/batch_job_queue.py136
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py57
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py449
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py1224
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py466
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py127
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py179
-rw-r--r--ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py136
-rw-r--r--ansible_collections/community/aws/plugins/modules/codebuild_project.py326
-rw-r--r--ansible_collections/community/aws/plugins/modules/codecommit_repository.py75
-rw-r--r--ansible_collections/community/aws/plugins/modules/codepipeline.py106
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py81
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_aggregator.py162
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_delivery_channel.py178
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_recorder.py136
-rw-r--r--ansible_collections/community/aws/plugins/modules/config_rule.py216
-rw-r--r--ansible_collections/community/aws/plugins/modules/data_pipeline.py279
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py96
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_connection.py156
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_gateway.py172
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py245
-rw-r--r--ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py268
-rw-r--r--ansible_collections/community/aws/plugins/modules/dms_endpoint.py278
-rw-r--r--ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py94
-rw-r--r--ansible_collections/community/aws/plugins/modules/dynamodb_table.py577
-rw-r--r--ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py79
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py106
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py259
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py159
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py154
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py77
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_launch_template.py387
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_placement_group.py161
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py79
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py75
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py210
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py75
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py148
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py69
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py116
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py274
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py138
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py235
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py68
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py287
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py61
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py507
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py72
-rw-r--r--ansible_collections/community/aws/plugins/modules/ec2_win_password.py79
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_attribute.py139
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_cluster.py197
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_ecr.py293
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_service.py517
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_service_info.py101
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_tag.py124
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_task.py319
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py597
-rw-r--r--ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py40
-rw-r--r--ansible_collections/community/aws/plugins/modules/efs.py409
-rw-r--r--ansible_collections/community/aws/plugins/modules/efs_info.py158
-rw-r--r--ansible_collections/community/aws/plugins/modules/efs_tag.py98
-rw-r--r--ansible_collections/community/aws/plugins/modules/eks_cluster.py140
-rw-r--r--ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py164
-rw-r--r--ansible_collections/community/aws/plugins/modules/eks_nodegroup.py439
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache.py345
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_info.py85
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py154
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py99
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py85
-rw-r--r--ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py82
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py123
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_instance.py192
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_network_lb.py152
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target.py143
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target_group.py465
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target_group_info.py93
-rw-r--r--ansible_collections/community/aws/plugins/modules/elb_target_info.py314
-rw-r--r--ansible_collections/community/aws/plugins/modules/glue_connection.py207
-rw-r--r--ansible_collections/community/aws/plugins/modules/glue_crawler.py225
-rw-r--r--ansible_collections/community/aws/plugins/modules/glue_job.py203
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_access_key.py317
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_access_key_info.py128
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_group.py433
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_managed_policy.py371
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py104
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_password_policy.py213
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_role.py736
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_role_info.py282
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_saml_federation.py107
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_server_certificate.py231
-rw-r--r--ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py67
-rw-r--r--ansible_collections/community/aws/plugins/modules/inspector_target.py117
-rw-r--r--ansible_collections/community/aws/plugins/modules/kinesis_stream.py674
-rw-r--r--ansible_collections/community/aws/plugins/modules/lightsail.py205
-rw-r--r--ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py205
-rw-r--r--ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py53
-rw-r--r--ansible_collections/community/aws/plugins/modules/mq_broker.py628
-rw-r--r--ansible_collections/community/aws/plugins/modules/mq_broker_config.py224
-rw-r--r--ansible_collections/community/aws/plugins/modules/mq_broker_info.py121
-rw-r--r--ansible_collections/community/aws/plugins/modules/mq_user.py271
-rw-r--r--ansible_collections/community/aws/plugins/modules/mq_user_info.py153
-rw-r--r--ansible_collections/community/aws/plugins/modules/msk_cluster.py169
-rw-r--r--ansible_collections/community/aws/plugins/modules/msk_config.py47
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall.py112
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_info.py53
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py122
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py49
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py172
-rw-r--r--ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py72
-rw-r--r--ansible_collections/community/aws/plugins/modules/opensearch.py473
-rw-r--r--ansible_collections/community/aws/plugins/modules/opensearch_info.py80
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift.py397
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py97
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift_info.py66
-rw-r--r--ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py106
-rw-r--r--ansible_collections/community/aws/plugins/modules/route53_wait.py185
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_bucket_info.py620
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py203
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_cors.py58
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_lifecycle.py297
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_logging.py106
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py121
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_sync.py278
-rw-r--r--ansible_collections/community/aws/plugins/modules/s3_website.py121
-rw-r--r--ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py185
-rw-r--r--ansible_collections/community/aws/plugins/modules/ses_identity.py229
-rw-r--r--ansible_collections/community/aws/plugins/modules/ses_identity_policy.py79
-rw-r--r--ansible_collections/community/aws/plugins/modules/ses_rule_set.py94
-rw-r--r--ansible_collections/community/aws/plugins/modules/sns.py89
-rw-r--r--ansible_collections/community/aws/plugins/modules/sns_topic.py326
-rw-r--r--ansible_collections/community/aws/plugins/modules/sns_topic_info.py41
-rw-r--r--ansible_collections/community/aws/plugins/modules/sqs_queue.py183
-rw-r--r--ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py114
-rw-r--r--ansible_collections/community/aws/plugins/modules/ssm_parameter.py217
-rw-r--r--ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py130
-rw-r--r--ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py119
-rw-r--r--ansible_collections/community/aws/plugins/modules/storagegateway_info.py103
-rw-r--r--ansible_collections/community/aws/plugins/modules/sts_assume_role.py172
-rw-r--r--ansible_collections/community/aws/plugins/modules/sts_session_token.py85
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_condition.py437
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_info.py44
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_rule.py241
-rw-r--r--ansible_collections/community/aws/plugins/modules/waf_web_acl.py229
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py152
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py69
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_resources.py80
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py57
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py182
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py71
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py200
-rw-r--r--ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py55
170 files changed, 16503 insertions, 17026 deletions
diff --git a/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py b/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py
index e589d0cb0..fab777175 100644
--- a/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py
+++ b/ansible_collections/community/aws/plugins/modules/accessanalyzer_validate_policy_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: accessanalyzer_validate_policy_info
version_added: 5.0.0
@@ -63,19 +61,19 @@ options:
author:
- Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Validate a policy
- name: Validate a simple IAM policy
community.aws.accessanalyzer_validate_policy_info:
policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
findings:
description: The list of findings in a policy returned by IAM Access Analyzer based on its suite of policy checks.
returned: success
@@ -160,7 +158,7 @@ findings:
description: The offset within the policy that corresponds to the position, starting from C(0).
type: int
returned: success
-'''
+"""
try:
import botocore
@@ -169,8 +167,9 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def filter_findings(findings, type_filter):
@@ -178,11 +177,10 @@ def filter_findings(findings, type_filter):
return findings
# Convert type_filter to the findingType strings returned by the API
- filter_map = dict(error='ERROR', security='SECURITY_WARNING',
- suggestion='SUGGESTION', warning='WARNING')
+ filter_map = dict(error="ERROR", security="SECURITY_WARNING", suggestion="SUGGESTION", warning="WARNING")
allowed_types = [filter_map[t] for t in type_filter]
- filtered_results = [f for f in findings if f.get('findingType', None) in allowed_types]
+ filtered_results = [f for f in findings if f.get("findingType", None) in allowed_types]
return filtered_results
@@ -191,47 +189,47 @@ def main():
# values are likely to be expanded, let's avoid hard coding limits which might not hold true in
# the long term...
argument_spec = dict(
- policy=dict(required=True, type='json', aliases=['policy_document']),
- locale=dict(required=False, type='str', default='EN'),
- policy_type=dict(required=False, type='str', default='identity',
- choices=['identity', 'resource', 'service_control']),
- resource_type=dict(required=False, type='str'),
- results_filter=dict(required=False, type='list', elements='str',
- choices=['error', 'security', 'suggestion', 'warning']),
+ policy=dict(required=True, type="json", aliases=["policy_document"]),
+ locale=dict(required=False, type="str", default="EN"),
+ policy_type=dict(
+ required=False, type="str", default="identity", choices=["identity", "resource", "service_control"]
+ ),
+ resource_type=dict(required=False, type="str"),
+ results_filter=dict(
+ required=False, type="list", elements="str", choices=["error", "security", "suggestion", "warning"]
+ ),
)
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- policy_type_map = dict(identity='IDENTITY_POLICY', resource='RESOURCE_POLICY',
- service_control='SERVICE_CONTROL_POLICY')
+ policy_type_map = dict(
+ identity="IDENTITY_POLICY", resource="RESOURCE_POLICY", service_control="SERVICE_CONTROL_POLICY"
+ )
- policy = module.params.get('policy')
- policy_type = policy_type_map[module.params.get('policy_type')]
- locale = module.params.get('locale').upper()
- resource_type = module.params.get('resource_type')
- results_filter = module.params.get('results_filter')
+ policy = module.params.get("policy")
+ policy_type = policy_type_map[module.params.get("policy_type")]
+ locale = module.params.get("locale").upper()
+ resource_type = module.params.get("resource_type")
+ results_filter = module.params.get("results_filter")
try:
- client = module.client('accessanalyzer', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("accessanalyzer", retry_decorator=AWSRetry.jittered_backoff())
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
params = dict(locale=locale, policyDocument=policy, policyType=policy_type)
- if policy_type == 'RESOURCE_POLICY' and resource_type:
- params['policyType'] = resource_type
+ if policy_type == "RESOURCE_POLICY" and resource_type:
+ params["policyType"] = resource_type
results = client.validate_policy(aws_retry=True, **params)
- findings = filter_findings(results.get('findings', []), results_filter)
- results['findings'] = findings
+ findings = filter_findings(results.get("findings", []), results_filter)
+ results["findings"] = findings
results = camel_dict_to_snake_dict(results)
module.exit_json(changed=False, **results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/acm_certificate.py b/ansible_collections/community/aws/plugins/modules/acm_certificate.py
index abdecadcc..0b4f7037a 100644
--- a/ansible_collections/community/aws/plugins/modules/acm_certificate.py
+++ b/ansible_collections/community/aws/plugins/modules/acm_certificate.py
@@ -1,31 +1,14 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-#
+
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
+
# Author:
# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
# on behalf of Telstra Corporation Limited
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: acm_certificate
short_description: Upload and delete certificates in the AWS Certificate Manager service
@@ -175,23 +158,23 @@ notes:
author:
- Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: upload a self-signed certificate
- community.aws.aws_acm:
+ community.aws.acm_certificate:
certificate: "{{ lookup('file', 'cert.pem' ) }}"
privateKey: "{{ lookup('file', 'key.pem' ) }}"
name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert"
region: ap-southeast-2 # AWS region
- name: create/update a certificate with a chain
- community.aws.aws_acm:
+ community.aws.acm_certificate:
certificate: "{{ lookup('file', 'cert.pem' ) }}"
private_key: "{{ lookup('file', 'key.pem' ) }}"
name_tag: my_cert
@@ -205,34 +188,34 @@ EXAMPLES = '''
var: cert_create.certificate.arn
- name: delete the cert we just created
- community.aws.aws_acm:
+ community.aws.acm_certificate:
name_tag: my_cert
state: absent
region: ap-southeast-2
- name: delete a certificate with a particular ARN
- community.aws.aws_acm:
+ community.aws.acm_certificate:
certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
state: absent
region: ap-southeast-2
- name: delete all certificates with a particular domain name
- community.aws.aws_acm:
+ community.aws.acm_certificate:
domain_name: acm.ansible.com
state: absent
region: ap-southeast-2
- name: add tags to an existing certificate with a particular ARN
- community.aws.aws_acm:
+ community.aws.acm_certificate:
certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
tags:
Name: my_certificate
Application: search
Environment: development
purge_tags: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
certificate:
description: Information about the certificate which was uploaded
type: complex
@@ -255,27 +238,27 @@ arns:
returned: when I(state=absent)
sample:
- "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
-'''
+"""
import base64
-from copy import deepcopy
import re # regex library
+from copy import deepcopy
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- boto3_tag_list_to_ansible_dict,
- ansible_dict_to_boto3_tag_list,
-)
from ansible.module_utils._text import to_text
+from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
if tags is None:
@@ -293,12 +276,10 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
botocore.exceptions.ClientError,
botocore.exceptions.BotoCoreError,
) as e:
- module.fail_json_aws(
- e, "Couldn't add tags to certificate {0}".format(resource_arn)
- )
+ module.fail_json_aws(e, f"Couldn't add tags to certificate {resource_arn}")
if tags_to_remove and not module.check_mode:
# remove_tags_from_certificate wants a list of key, value pairs, not a list of keys.
- tags_list = [{'Key': key, 'Value': existing_tags.get(key)} for key in tags_to_remove]
+ tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove]
try:
client.remove_tags_from_certificate(
CertificateArn=resource_arn,
@@ -308,9 +289,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
botocore.exceptions.ClientError,
botocore.exceptions.BotoCoreError,
) as e:
- module.fail_json_aws(
- e, "Couldn't remove tags from certificate {0}".format(resource_arn)
- )
+ module.fail_json_aws(e, f"Couldn't remove tags from certificate {resource_arn}")
new_tags = deepcopy(existing_tags)
for key, value in tags_to_add.items():
new_tags[key] = value
@@ -325,7 +304,6 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
# May include some lines between each chain in the cert, e.g. "Subject: ..."
# Returns True iff the chains/certs are functionally identical (including chain order)
def chain_compare(module, a, b):
-
chain_a_pem = pem_chain_split(module, a)
chain_b_pem = pem_chain_split(module, b)
@@ -333,7 +311,7 @@ def chain_compare(module, a, b):
return False
# Chain length is the same
- for (ca, cb) in zip(chain_a_pem, chain_b_pem):
+ for ca, cb in zip(chain_a_pem, chain_b_pem):
der_a = PEM_body_to_DER(module, ca)
der_b = PEM_body_to_DER(module, cb)
if der_a != der_b:
@@ -353,7 +331,9 @@ def PEM_body_to_DER(module, pem):
# Store this globally to avoid repeated recompilation
-pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?")
+pem_chain_split_regex = re.compile(
+ r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?"
+)
# Use regex to split up a chain or single cert into an array of base64 encoded data
@@ -361,7 +341,6 @@ pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?(
# Noting that some chains have non-pem data in between each cert
# This function returns only what's between the headers, excluding the headers
def pem_chain_split(module, pem):
-
pem_arr = re.findall(pem_chain_split_regex, to_text(pem))
if len(pem_arr) == 0:
@@ -376,53 +355,55 @@ def update_imported_certificate(client, module, acm, old_cert, desired_tags):
Update the existing certificate that was previously imported in ACM.
"""
module.debug("Existing certificate found in ACM")
- if ('tags' not in old_cert) or ('Name' not in old_cert['tags']):
+ if ("tags" not in old_cert) or ("Name" not in old_cert["tags"]):
# shouldn't happen
module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert)
- if module.params.get('name_tag') is not None and (old_cert['tags']['Name'] != module.params.get('name_tag')):
+ if module.params.get("name_tag") is not None and (old_cert["tags"]["Name"] != module.params.get("name_tag")):
# This could happen if the user identified the certificate using 'certificate_arn' or 'domain_name',
# and the 'Name' tag in the AWS API does not match the ansible 'name_tag'.
module.fail_json(msg="Internal error, Name tag does not match", certificate=old_cert)
- if 'certificate' not in old_cert:
+ if "certificate" not in old_cert:
# shouldn't happen
module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert)
cert_arn = None
# Are the existing certificate in ACM and the local certificate the same?
same = True
- if module.params.get('certificate') is not None:
- same &= chain_compare(module, old_cert['certificate'], module.params['certificate'])
- if module.params['certificate_chain']:
+ if module.params.get("certificate") is not None:
+ same &= chain_compare(module, old_cert["certificate"], module.params["certificate"])
+ if module.params["certificate_chain"]:
# Need to test this
# not sure if Amazon appends the cert itself to the chain when self-signed
- same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain'])
+ same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate_chain"])
else:
# When there is no chain with a cert
# it seems Amazon returns the cert itself as the chain
- same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate'])
+ same &= chain_compare(module, old_cert["certificate_chain"], module.params["certificate"])
if same:
module.debug("Existing certificate in ACM is the same")
- cert_arn = old_cert['certificate_arn']
+ cert_arn = old_cert["certificate_arn"]
changed = False
else:
- absent_args = ['certificate', 'name_tag', 'private_key']
+ absent_args = ["certificate", "name_tag", "private_key"]
if sum([(module.params[a] is not None) for a in absent_args]) < 3:
- module.fail_json(msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified")
+ module.fail_json(
+ msg="When importing a certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified"
+ )
module.debug("Existing certificate in ACM is different, overwriting")
changed = True
if module.check_mode:
- cert_arn = old_cert['certificate_arn']
+ cert_arn = old_cert["certificate_arn"]
# note: returned domain will be the domain of the previous cert
else:
# update cert in ACM
cert_arn = acm.import_certificate(
client,
module,
- certificate=module.params['certificate'],
- private_key=module.params['private_key'],
- certificate_chain=module.params['certificate_chain'],
- arn=old_cert['certificate_arn'],
+ certificate=module.params["certificate"],
+ private_key=module.params["private_key"],
+ certificate_chain=module.params["certificate_chain"],
+ arn=old_cert["certificate_arn"],
tags=desired_tags,
)
return (changed, cert_arn)
@@ -433,22 +414,24 @@ def import_certificate(client, module, acm, desired_tags):
Import a certificate to ACM.
"""
# Validate argument requirements
- absent_args = ['certificate', 'name_tag', 'private_key']
+ absent_args = ["certificate", "name_tag", "private_key"]
cert_arn = None
if sum([(module.params[a] is not None) for a in absent_args]) < 3:
- module.fail_json(msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified")
+ module.fail_json(
+ msg="When importing a new certificate, all of 'name_tag', 'certificate' and 'private_key' must be specified"
+ )
module.debug("No certificate in ACM. Creating new one.")
changed = True
if module.check_mode:
- domain = 'example.com'
+ domain = "example.com"
module.exit_json(certificate=dict(domain_name=domain), changed=True)
else:
cert_arn = acm.import_certificate(
client,
module,
- certificate=module.params['certificate'],
- private_key=module.params['private_key'],
- certificate_chain=module.params['certificate_chain'],
+ certificate=module.params["certificate"],
+ private_key=module.params["private_key"],
+ certificate_chain=module.params["certificate_chain"],
tags=desired_tags,
)
return (changed, cert_arn)
@@ -458,7 +441,7 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags,
cert_arn = None
changed = False
if len(certificates) > 1:
- msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag']
+ msg = f"More than one certificate with Name={module.params['name_tag']} exists in ACM in this region"
module.fail_json(msg=msg, certificates=certificates)
elif len(certificates) == 1:
# Update existing certificate that was previously imported to ACM.
@@ -469,11 +452,13 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags,
# Add/remove tags to/from certificate
try:
- existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_certificate(CertificateArn=cert_arn)['Tags'])
+ existing_tags = boto3_tag_list_to_ansible_dict(
+ client.list_tags_for_certificate(CertificateArn=cert_arn)["Tags"]
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Couldn't get tags for certificate")
- purge_tags = module.params.get('purge_tags')
+ purge_tags = module.params.get("purge_tags")
(c, new_tags) = ensure_tags(client, module, cert_arn, existing_tags, desired_tags, purge_tags)
changed |= c
domain = acm.get_domain_of_cert(client=client, module=module, arn=cert_arn)
@@ -483,21 +468,21 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags,
def ensure_certificates_absent(client, module, acm, certificates):
for cert in certificates:
if not module.check_mode:
- acm.delete_certificate(client, module, cert['certificate_arn'])
- module.exit_json(arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0))
+ acm.delete_certificate(client, module, cert["certificate_arn"])
+ module.exit_json(arns=[cert["certificate_arn"] for cert in certificates], changed=(len(certificates) > 0))
def main():
argument_spec = dict(
certificate=dict(),
- certificate_arn=dict(aliases=['arn']),
+ certificate_arn=dict(aliases=["arn"]),
certificate_chain=dict(),
- domain_name=dict(aliases=['domain']),
- name_tag=dict(aliases=['name']),
+ domain_name=dict(aliases=["domain"]),
+ name_tag=dict(aliases=["name"]),
private_key=dict(no_log=True),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ state=dict(default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
@@ -506,62 +491,66 @@ def main():
acm = ACMServiceManager(module)
# Check argument requirements
- if module.params['state'] == 'present':
+ if module.params["state"] == "present":
# at least one of these should be specified.
- absent_args = ['certificate_arn', 'domain_name', 'name_tag']
+ absent_args = ["certificate_arn", "domain_name", "name_tag"]
if sum([(module.params[a] is not None) for a in absent_args]) < 1:
for a in absent_args:
- module.debug("%s is %s" % (a, module.params[a]))
- module.fail_json(msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified")
+ module.debug(f"{a} is {module.params[a]}")
+ module.fail_json(
+ msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified"
+ )
else: # absent
# exactly one of these should be specified
- absent_args = ['certificate_arn', 'domain_name', 'name_tag']
+ absent_args = ["certificate_arn", "domain_name", "name_tag"]
if sum([(module.params[a] is not None) for a in absent_args]) != 1:
for a in absent_args:
- module.debug("%s is %s" % (a, module.params[a]))
- module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified")
+ module.debug(f"{a} is {module.params[a]}")
+ module.fail_json(
+ msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified"
+ )
filter_tags = None
desired_tags = None
- if module.params.get('tags') is not None:
- desired_tags = module.params['tags']
+ if module.params.get("tags") is not None:
+ desired_tags = module.params["tags"]
else:
# Because we're setting the Name tag, we need to explicitly not purge when tags isn't passed
- module.params['purge_tags'] = False
- if module.params.get('name_tag') is not None:
+ module.params["purge_tags"] = False
+ if module.params.get("name_tag") is not None:
# The module was originally implemented to filter certificates based on the 'Name' tag.
# Other tags are not used to filter certificates.
# It would make sense to replace the existing name_tag, domain, certificate_arn attributes
# with a 'filter' attribute, but that would break backwards-compatibility.
- filter_tags = dict(Name=module.params['name_tag'])
+ filter_tags = dict(Name=module.params["name_tag"])
if desired_tags is not None:
- if 'Name' in desired_tags:
- if desired_tags['Name'] != module.params['name_tag']:
+ if "Name" in desired_tags:
+ if desired_tags["Name"] != module.params["name_tag"]:
module.fail_json(msg="Value of 'name_tag' conflicts with value of 'tags.Name'")
else:
- desired_tags['Name'] = module.params['name_tag']
+ desired_tags["Name"] = module.params["name_tag"]
else:
desired_tags = deepcopy(filter_tags)
- client = module.client('acm')
+ client = module.client("acm")
# fetch the list of certificates currently in ACM
certificates = acm.get_certificates(
client=client,
module=module,
- domain_name=module.params['domain_name'],
- arn=module.params['certificate_arn'],
+ domain_name=module.params["domain_name"],
+ arn=module.params["certificate_arn"],
only_tags=filter_tags,
)
- module.debug("Found %d corresponding certificates in ACM" % len(certificates))
- if module.params['state'] == 'present':
+ module.debug(f"Found {len(certificates)} corresponding certificates in ACM")
+ if module.params["state"] == "present":
ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags)
else: # state == absent
ensure_certificates_absent(client, module, acm, certificates)
-if __name__ == '__main__':
+if __name__ == "__main__":
# tests()
main()
diff --git a/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py b/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py
index a84d7c0b0..73da208f1 100644
--- a/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py
+++ b/ansible_collections/community/aws/plugins/modules/acm_certificate_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: acm_certificate_info
short_description: Retrieve certificate information from AWS Certificate Manager service
version_added: 1.0.0
@@ -43,26 +41,26 @@ options:
author:
- Will Thames (@willthames)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: obtain all ACM certificates
- community.aws.aws_acm_info:
+ community.aws.acm_certificate_info:
- name: obtain all information for a single ACM certificate
- community.aws.aws_acm_info:
+ community.aws.acm_certificate_info:
domain_name: "*.example_com"
- name: obtain all certificates pending validation
- community.aws.aws_acm_info:
+ community.aws.acm_certificate_info:
statuses:
- - PENDING_VALIDATION
+ - PENDING_VALIDATION
- name: obtain all certificates with tag Name=foo and myTag=bar
- community.aws.aws_acm_info:
+ community.aws.acm_certificate_info:
tags:
Name: foo
myTag: bar
@@ -70,12 +68,11 @@ EXAMPLES = r'''
# The output is still a list of certificates, just one item long.
- name: obtain information about a certificate with a particular ARN
- community.aws.aws_acm_info:
- certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
+ community.aws.acm_certificate_info:
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
certificates:
description: A list of certificates
returned: always
@@ -257,39 +254,51 @@ certificates:
returned: always
sample: AMAZON_ISSUED
type: str
-'''
+"""
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def main():
argument_spec = dict(
- certificate_arn=dict(aliases=['arn']),
- domain_name=dict(aliases=['name']),
+ certificate_arn=dict(aliases=["arn"]),
+ domain_name=dict(aliases=["name"]),
statuses=dict(
- type='list',
- elements='str',
- choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
+ type="list",
+ elements="str",
+ choices=[
+ "PENDING_VALIDATION",
+ "ISSUED",
+ "INACTIVE",
+ "EXPIRED",
+ "VALIDATION_TIMED_OUT",
+ "REVOKED",
+ "FAILED",
+ ],
),
- tags=dict(type='dict'),
+ tags=dict(type="dict"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
acm_info = ACMServiceManager(module)
- client = module.client('acm')
+ client = module.client("acm")
- certificates = acm_info.get_certificates(client, module,
- domain_name=module.params['domain_name'],
- statuses=module.params['statuses'],
- arn=module.params['certificate_arn'],
- only_tags=module.params['tags'])
+ certificates = acm_info.get_certificates(
+ client,
+ module,
+ domain_name=module.params["domain_name"],
+ statuses=module.params["statuses"],
+ arn=module.params["certificate_arn"],
+ only_tags=module.params["tags"],
+ )
- if module.params['certificate_arn'] and len(certificates) != 1:
- module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn'])
+ if module.params["certificate_arn"] and len(certificates) != 1:
+ module.fail_json(msg=f"No certificate exists in this region with ARN {module.params['certificate_arn']}")
module.exit_json(certificates=certificates)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway.py b/ansible_collections/community/aws/plugins/modules/api_gateway.py
index a084bf93e..af4432387 100644
--- a/ansible_collections/community/aws/plugins/modules/api_gateway.py
+++ b/ansible_collections/community/aws/plugins/modules/api_gateway.py
@@ -4,11 +4,7 @@
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: api_gateway
version_added: 1.0.0
@@ -102,21 +98,34 @@ options:
choices: ['EDGE', 'REGIONAL', 'PRIVATE']
type: str
default: EDGE
+ name:
+ description:
+ - The name of the RestApi.
+ type: str
+ version_added: 6.2.0
+ lookup:
+ description:
+ - Look up API gateway by either I(tags) (and I(name) if supplied) or by I(api_id).
+ - If I(lookup=tag) and I(tags) is not specified then no lookup for an existing API gateway
+ is performed and a new API gateway will be created.
+ - When using I(lookup=tag), multiple matches being found will result in a failure and no changes will be made.
+ - To change the tags of a API gateway use I(lookup=id).
+ default: tag
+ choices: [ 'tag', 'id' ]
+ type: str
+ version_added: 6.2.0
author:
- 'Michael De La Rue (@mikedlr)'
+notes:
+ - 'Tags are used to uniquely identify API gateway when the I(api_id) is not supplied. version_added=6.2.0'
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
+ - amazon.aws.tags
+"""
-notes:
- - A future version of this module will probably use tags or another
- ID so that an API can be created only once.
- - As an early work around an intermediate version will probably do
- the same using a tag embedded in the API name.
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Setup AWS API Gateway setup on AWS and deploy API definition
community.aws.api_gateway:
swagger_file: my_api.yml
@@ -143,11 +152,22 @@ EXAMPLES = '''
swagger_file: my_api.yml
cache_enabled: true
cache_size: '6.1'
- canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True }
+ canary_settings:
+ percentTraffic: 50.0
+ deploymentId: '123'
+ useStageCache: true
state: present
-'''
-RETURN = '''
+- name: Delete API gateway
+ amazon.aws.api_gateway:
+ name: ansible-rest-api
+ tags:
+ automation: ansible
+ lookup: tags
+ state: absent
+"""
+
+RETURN = r"""
api_id:
description: API id of the API endpoint created
returned: success
@@ -168,7 +188,7 @@ resource_actions:
returned: always
type: list
sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"]
-'''
+"""
import json
import traceback
@@ -180,70 +200,134 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def main():
argument_spec = dict(
- api_id=dict(type='str', required=False),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),
- swagger_dict=dict(type='json', default=None),
- swagger_text=dict(type='str', default=None),
- stage=dict(type='str', default=None),
- deploy_desc=dict(type='str', default="Automatic deployment by Ansible."),
- cache_enabled=dict(type='bool', default=False),
- cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']),
- stage_variables=dict(type='dict', default={}),
- stage_canary_settings=dict(type='dict', default={}),
- tracing_enabled=dict(type='bool', default=False),
- endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE'])
+ api_id=dict(type="str", required=False),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ swagger_file=dict(type="path", default=None, aliases=["src", "api_file"]),
+ swagger_dict=dict(type="json", default=None),
+ swagger_text=dict(type="str", default=None),
+ stage=dict(type="str", default=None),
+ deploy_desc=dict(type="str", default="Automatic deployment by Ansible."),
+ cache_enabled=dict(type="bool", default=False),
+ cache_size=dict(type="str", default="0.5", choices=["0.5", "1.6", "6.1", "13.5", "28.4", "58.2", "118", "237"]),
+ stage_variables=dict(type="dict", default={}),
+ stage_canary_settings=dict(type="dict", default={}),
+ tracing_enabled=dict(type="bool", default=False),
+ endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]),
+ name=dict(type="str"),
+ lookup=dict(type="str", choices=["tag", "id"], default="tag"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
)
- mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841
+ mutually_exclusive = [["swagger_file", "swagger_dict", "swagger_text"]] # noqa: F841
module = AnsibleAWSModule(
argument_spec=argument_spec,
- supports_check_mode=False,
+ supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
- api_id = module.params.get('api_id')
- state = module.params.get('state') # noqa: F841
- swagger_file = module.params.get('swagger_file')
- swagger_dict = module.params.get('swagger_dict')
- swagger_text = module.params.get('swagger_text')
- endpoint_type = module.params.get('endpoint_type')
+ api_id = module.params.get("api_id")
+ state = module.params.get("state") # noqa: F841
+ swagger_file = module.params.get("swagger_file")
+ swagger_dict = module.params.get("swagger_dict")
+ swagger_text = module.params.get("swagger_text")
+ endpoint_type = module.params.get("endpoint_type")
+ name = module.params.get("name")
+ tags = module.params.get("tags")
+ lookup = module.params.get("lookup")
- client = module.client('apigateway')
+ client = module.client("apigateway")
- changed = True # for now it will stay that way until we can sometimes avoid change
+ changed = True # for now it will stay that way until we can sometimes avoid change
conf_res = None
dep_res = None
del_res = None
if state == "present":
if api_id is None:
- api_id = create_empty_api(module, client, endpoint_type)
- api_data = get_api_definitions(module, swagger_file=swagger_file,
- swagger_dict=swagger_dict, swagger_text=swagger_text)
- conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data)
+ # lookup API gateway using tags
+ if tags and lookup == "tag":
+ rest_api = get_api_by_tags(client, module, name, tags)
+ if rest_api:
+ api_id = rest_api["id"]
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Create/update operation skipped - running in check mode.")
+ if api_id is None:
+ api_data = get_api_definitions(
+ module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text
+ )
+ # create new API gateway as non were provided and/or found using lookup=tag
+ api_id = create_empty_api(module, client, name, endpoint_type, tags)
+ conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data)
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ if tags:
+ if not conf_res:
+ conf_res = get_rest_api(module, client, api_id=api_id)
+ tag_changed, tag_result = ensure_apigateway_tags(
+ module, client, api_id=api_id, current_tags=conf_res.get("tags"), new_tags=tags, purge_tags=purge_tags
+ )
+ if tag_changed:
+ changed |= tag_changed
+ conf_res = tag_result
if state == "absent":
+ if api_id is None:
+ if lookup != "tag" or not tags:
+ module.fail_json(
+ msg="API gateway id must be supplied to delete API gateway or provided tag with lookup=tag to identify API gateway id."
+ )
+ rest_api = get_api_by_tags(client, module, name, tags)
+ if not rest_api:
+ module.exit_json(changed=False, msg="No API gateway identified with tags provided")
+ api_id = rest_api["id"]
+ elif not describe_api(client, module, api_id):
+ module.exit_json(changed=False, msg=f"API gateway id '{api_id}' does not exist.")
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Delete operation skipped - running in check mode.", api_id=api_id)
+
del_res = delete_rest_api(module, client, api_id)
exit_args = {"changed": changed, "api_id": api_id}
if conf_res is not None:
- exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
+ exit_args["configure_response"] = camel_dict_to_snake_dict(conf_res)
if dep_res is not None:
- exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
+ exit_args["deploy_response"] = camel_dict_to_snake_dict(dep_res)
if del_res is not None:
- exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)
+ exit_args["delete_response"] = camel_dict_to_snake_dict(del_res)
module.exit_json(**exit_args)
+def ensure_apigateway_tags(module, client, api_id, current_tags, new_tags, purge_tags):
+ changed = False
+ tag_result = {}
+ tags_to_set, tags_to_delete = compare_aws_tags(current_tags, new_tags, purge_tags)
+ if tags_to_set or tags_to_delete:
+ changed = True
+ apigateway_arn = f"arn:aws:apigateway:{module.region}::/restapis/{api_id}"
+ # Remove tags from Resource
+ if tags_to_delete:
+ client.untag_resource(resourceArn=apigateway_arn, tagKeys=tags_to_delete)
+ # add new tags to resource
+ if tags_to_set:
+ client.tag_resource(resourceArn=apigateway_arn, tags=tags_to_set)
+ # Describe API gateway
+ tag_result = get_rest_api(module, client, api_id=api_id)
+ return changed, tag_result
+
+
def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):
apidata = None
if swagger_file is not None:
@@ -251,7 +335,7 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te
with open(swagger_file) as f:
apidata = f.read()
except OSError as e:
- msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e))
+ msg = f"Failed trying to read swagger file {str(swagger_file)}: {str(e)}"
module.fail_json(msg=msg, exception=traceback.format_exc())
if swagger_dict is not None:
apidata = json.dumps(swagger_dict)
@@ -259,11 +343,20 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te
apidata = swagger_text
if apidata is None:
- module.fail_json(msg='module error - no swagger info provided')
+ module.fail_json(msg="module error - no swagger info provided")
return apidata
-def create_empty_api(module, client, endpoint_type):
+def get_rest_api(module, client, api_id):
+ try:
+ response = client.get_rest_api(restApiId=api_id)
+ response.pop("ResponseMetadata", None)
+ return response
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg=f"failed to get REST API with api_id={api_id}")
+
+
+def create_empty_api(module, client, name, endpoint_type, tags):
"""
creates a new empty API ready to be configured. The description is
temporarily set to show the API as incomplete but should be
@@ -271,7 +364,8 @@ def create_empty_api(module, client, endpoint_type):
"""
desc = "Incomplete API creation by ansible api_gateway module"
try:
- awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type)
+ rest_api_name = name or "ansible-temp-api"
+ awsret = create_api(client, name=rest_api_name, description=desc, endpoint_type=endpoint_type, tags=tags)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
module.fail_json_aws(e, msg="creating API")
return awsret["id"]
@@ -284,7 +378,7 @@ def delete_rest_api(module, client, api_id):
try:
delete_response = delete_api(client, api_id)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- module.fail_json_aws(e, msg="deleting API {0}".format(api_id))
+ module.fail_json_aws(e, msg=f"deleting API {api_id}")
return delete_response
@@ -301,28 +395,56 @@ def ensure_api_in_correct_state(module, client, api_id, api_data):
configure_response = None
try:
configure_response = configure_api(client, api_id, api_data=api_data)
+ configure_response.pop("ResponseMetadata", None)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- module.fail_json_aws(e, msg="configuring API {0}".format(api_id))
+ module.fail_json_aws(e, msg=f"configuring API {api_id}")
deploy_response = None
- stage = module.params.get('stage')
+ stage = module.params.get("stage")
if stage:
try:
deploy_response = create_deployment(client, api_id, **module.params)
+ deploy_response.pop("ResponseMetadata", None)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- msg = "deploying api {0} to stage {1}".format(api_id, stage)
+ msg = f"deploying api {api_id} to stage {stage}"
module.fail_json_aws(e, msg)
return configure_response, deploy_response
-retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']}
+def get_api_by_tags(client, module, name, tags):
+ count = 0
+ result = None
+ for api in list_apis(client):
+ if name and api["name"] != name:
+ continue
+ api_tags = api.get("tags", {})
+ if all((tag_key in api_tags and api_tags[tag_key] == tag_value for tag_key, tag_value in tags.items())):
+ result = api
+ count += 1
+
+ if count > 1:
+ args = "Tags"
+ if name:
+ args += " and name"
+ module.fail_json(msg=f"{args} provided do not identify a unique API gateway")
+ return result
+
+
+retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ["TooManyRequestsException"]}
@AWSRetry.jittered_backoff(**retry_params)
-def create_api(client, name=None, description=None, endpoint_type=None):
- return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]})
+def create_api(client, name, description=None, endpoint_type=None, tags=None):
+ params = {"name": name}
+ if description:
+ params["description"] = description
+ if endpoint_type:
+ params["endpointConfiguration"] = {"types": [endpoint_type]}
+ if tags:
+ params["tags"] = tags
+ return client.create_rest_api(**params)
@AWSRetry.jittered_backoff(**retry_params)
@@ -337,32 +459,53 @@ def configure_api(client, api_id, api_data=None, mode="overwrite"):
@AWSRetry.jittered_backoff(**retry_params)
def create_deployment(client, rest_api_id, **params):
- canary_settings = params.get('stage_canary_settings')
+ canary_settings = params.get("stage_canary_settings")
if canary_settings and len(canary_settings) > 0:
result = client.create_deployment(
restApiId=rest_api_id,
- stageName=params.get('stage'),
- description=params.get('deploy_desc'),
- cacheClusterEnabled=params.get('cache_enabled'),
- cacheClusterSize=params.get('cache_size'),
- variables=params.get('stage_variables'),
+ stageName=params.get("stage"),
+ description=params.get("deploy_desc"),
+ cacheClusterEnabled=params.get("cache_enabled"),
+ cacheClusterSize=params.get("cache_size"),
+ variables=params.get("stage_variables"),
canarySettings=canary_settings,
- tracingEnabled=params.get('tracing_enabled')
+ tracingEnabled=params.get("tracing_enabled"),
)
else:
result = client.create_deployment(
restApiId=rest_api_id,
- stageName=params.get('stage'),
- description=params.get('deploy_desc'),
- cacheClusterEnabled=params.get('cache_enabled'),
- cacheClusterSize=params.get('cache_size'),
- variables=params.get('stage_variables'),
- tracingEnabled=params.get('tracing_enabled')
+ stageName=params.get("stage"),
+ description=params.get("deploy_desc"),
+ cacheClusterEnabled=params.get("cache_enabled"),
+ cacheClusterSize=params.get("cache_size"),
+ variables=params.get("stage_variables"),
+ tracingEnabled=params.get("tracing_enabled"),
)
return result
-if __name__ == '__main__':
+@AWSRetry.jittered_backoff(**retry_params)
+def list_apis(client):
+ paginator = client.get_paginator("get_rest_apis")
+ return paginator.paginate().build_full_result().get("items", [])
+
+
+@AWSRetry.jittered_backoff(**retry_params)
+def describe_api(client, module, rest_api_id):
+ try:
+ response = client.get_rest_api(restApiId=rest_api_id)
+ response.pop("ResponseMetadata")
+ except is_boto3_error_code("ResourceNotFoundException"):
+ response = {}
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Trying to get Rest API '{rest_api_id}'.")
+ return response
+
+
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py b/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py
index 9b4ec8572..8ffbdaf20 100644
--- a/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py
+++ b/ansible_collections/community/aws/plugins/modules/api_gateway_domain.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: api_gateway_domain
short_description: Manage AWS API Gateway custom domains
@@ -57,17 +55,17 @@ options:
default: present
choices: [ 'present', 'absent' ]
type: str
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
notes:
- Does not create a DNS entry on Route53, for that use the M(community.aws.route53) module.
- Only supports TLS certificates from AWS ACM that can just be referenced by the ARN, while the AWS API still offers (deprecated)
options to add own Certificates.
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Setup endpoint for a custom domain for your API Gateway HTTP API
community.aws.api_gateway_domain:
domain_name: myapi.foobar.com
@@ -75,7 +73,8 @@ EXAMPLES = '''
security_policy: TLS_1_2
endpoint_type: EDGE
domain_mappings:
- - { rest_api_id: abc123, stage: production }
+ - rest_api_id: abc123
+ stage: production
state: present
register: api_gw_domain_result
@@ -88,9 +87,9 @@ EXAMPLES = '''
zone: foobar.com
alias_hosted_zone_id: "{{ api_gw_domain_result.response.domain.distribution_hosted_zone_id }}"
command: create
-'''
+"""
-RETURN = '''
+RETURN = r"""
response:
description: The data returned by create_domain_name (or update and delete) and create_base_path_mapping methods by boto3.
returned: success
@@ -110,27 +109,33 @@ response:
path_mappings: [
{ base_path: '(empty)', rest_api_id: 'abcd123', stage: 'production' }
]
-'''
+"""
+
+import copy
try:
- from botocore.exceptions import ClientError, BotoCoreError, EndpointConnectionError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import EndpointConnectionError
except ImportError:
pass # caught by imported AnsibleAWSModule
-import copy
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_domain(module, client):
- domain_name = module.params.get('domain_name')
+ domain_name = module.params.get("domain_name")
result = {}
try:
- result['domain'] = get_domain_name(client, domain_name)
- result['path_mappings'] = get_domain_mappings(client, domain_name)
- except is_boto3_error_code('NotFoundException'):
+ result["domain"] = get_domain_name(client, domain_name)
+ result["path_mappings"] = get_domain_mappings(client, domain_name)
+ except is_boto3_error_code("NotFoundException"):
return None
except (ClientError, BotoCoreError, EndpointConnectionError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="getting API GW domain")
@@ -138,28 +143,28 @@ def get_domain(module, client):
def create_domain(module, client):
- path_mappings = module.params.get('domain_mappings', [])
- domain_name = module.params.get('domain_name')
- result = {'domain': {}, 'path_mappings': []}
+ path_mappings = module.params.get("domain_mappings", [])
+ domain_name = module.params.get("domain_name")
+ result = {"domain": {}, "path_mappings": []}
try:
- result['domain'] = create_domain_name(
+ result["domain"] = create_domain_name(
module,
client,
domain_name,
- module.params.get('certificate_arn'),
- module.params.get('endpoint_type'),
- module.params.get('security_policy')
+ module.params.get("certificate_arn"),
+ module.params.get("endpoint_type"),
+ module.params.get("security_policy"),
)
for mapping in path_mappings:
- base_path = mapping.get('base_path', '')
- rest_api_id = mapping.get('rest_api_id')
- stage = mapping.get('stage')
+ base_path = mapping.get("base_path", "")
+ rest_api_id = mapping.get("rest_api_id")
+ stage = mapping.get("stage")
if rest_api_id is None or stage is None:
- module.fail_json('Every domain mapping needs a rest_api_id and stage name')
+ module.fail_json("Every domain mapping needs a rest_api_id and stage name")
- result['path_mappings'].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage))
+ result["path_mappings"].append(add_domain_mapping(client, domain_name, base_path, rest_api_id, stage))
except (ClientError, BotoCoreError, EndpointConnectionError) as e:
module.fail_json_aws(e, msg="creating API GW domain")
@@ -167,54 +172,56 @@ def create_domain(module, client):
def update_domain(module, client, existing_domain):
- domain_name = module.params.get('domain_name')
+ domain_name = module.params.get("domain_name")
result = existing_domain
- result['updated'] = False
+ result["updated"] = False
- domain = existing_domain.get('domain')
+ domain = existing_domain.get("domain")
# Compare only relevant set of domain arguments.
# As get_domain_name gathers all kind of state information that can't be set anyways.
# Also this module doesn't support custom TLS cert setup params as they are kind of deprecated already and would increase complexity.
existing_domain_settings = {
- 'certificate_arn': domain.get('certificate_arn'),
- 'security_policy': domain.get('security_policy'),
- 'endpoint_type': domain.get('endpoint_configuration').get('types')[0]
+ "certificate_arn": domain.get("certificate_arn"),
+ "security_policy": domain.get("security_policy"),
+ "endpoint_type": domain.get("endpoint_configuration").get("types")[0],
}
specified_domain_settings = {
- 'certificate_arn': module.params.get('certificate_arn'),
- 'security_policy': module.params.get('security_policy'),
- 'endpoint_type': module.params.get('endpoint_type')
+ "certificate_arn": module.params.get("certificate_arn"),
+ "security_policy": module.params.get("security_policy"),
+ "endpoint_type": module.params.get("endpoint_type"),
}
if specified_domain_settings != existing_domain_settings:
try:
- result['domain'] = update_domain_name(client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings))
- result['updated'] = True
+ result["domain"] = update_domain_name(
+ client, domain_name, **snake_dict_to_camel_dict(specified_domain_settings)
+ )
+ result["updated"] = True
except (ClientError, BotoCoreError, EndpointConnectionError) as e:
module.fail_json_aws(e, msg="updating API GW domain")
- existing_mappings = copy.deepcopy(existing_domain.get('path_mappings', []))
+ existing_mappings = copy.deepcopy(existing_domain.get("path_mappings", []))
# Cleanout `base_path: "(none)"` elements from dicts as those won't match with specified mappings
for mapping in existing_mappings:
- if mapping.get('base_path', 'missing') == '(none)':
- mapping.pop('base_path')
+ if mapping.get("base_path", "missing") == "(none)":
+ mapping.pop("base_path")
- specified_mappings = copy.deepcopy(module.params.get('domain_mappings', []))
+ specified_mappings = copy.deepcopy(module.params.get("domain_mappings", []))
# Cleanout `base_path: ""` elements from dicts as those won't match with existing mappings
for mapping in specified_mappings:
- if mapping.get('base_path', 'missing') == '':
- mapping.pop('base_path')
+ if mapping.get("base_path", "missing") == "":
+ mapping.pop("base_path")
if specified_mappings != existing_mappings:
try:
# When lists missmatch delete all existing mappings before adding new ones as specified
- for mapping in existing_domain.get('path_mappings', []):
- delete_domain_mapping(client, domain_name, mapping['base_path'])
- for mapping in module.params.get('domain_mappings', []):
- result['path_mappings'] = add_domain_mapping(
- client, domain_name, mapping.get('base_path', ''), mapping.get('rest_api_id'), mapping.get('stage')
+ for mapping in existing_domain.get("path_mappings", []):
+ delete_domain_mapping(client, domain_name, mapping["base_path"])
+ for mapping in module.params.get("domain_mappings", []):
+ result["path_mappings"] = add_domain_mapping(
+ client, domain_name, mapping.get("base_path", ""), mapping.get("rest_api_id"), mapping.get("stage")
)
- result['updated'] = True
+ result["updated"] = True
except (ClientError, BotoCoreError, EndpointConnectionError) as e:
module.fail_json_aws(e, msg="updating API GW domain mapping")
@@ -222,7 +229,7 @@ def update_domain(module, client, existing_domain):
def delete_domain(module, client):
- domain_name = module.params.get('domain_name')
+ domain_name = module.params.get("domain_name")
try:
result = delete_domain_name(client, domain_name)
except (ClientError, BotoCoreError, EndpointConnectionError) as e:
@@ -240,19 +247,19 @@ def get_domain_name(client, domain_name):
@AWSRetry.jittered_backoff(**retry_params)
def get_domain_mappings(client, domain_name):
- return client.get_base_path_mappings(domainName=domain_name, limit=200).get('items', [])
+ return client.get_base_path_mappings(domainName=domain_name, limit=200).get("items", [])
@AWSRetry.jittered_backoff(**retry_params)
def create_domain_name(module, client, domain_name, certificate_arn, endpoint_type, security_policy):
- endpoint_configuration = {'types': [endpoint_type]}
+ endpoint_configuration = {"types": [endpoint_type]}
- if endpoint_type == 'EDGE':
+ if endpoint_type == "EDGE":
return client.create_domain_name(
domainName=domain_name,
certificateArn=certificate_arn,
endpointConfiguration=endpoint_configuration,
- securityPolicy=security_policy
+ securityPolicy=security_policy,
)
else:
# Use regionalCertificateArn for regional domain deploys
@@ -260,13 +267,15 @@ def create_domain_name(module, client, domain_name, certificate_arn, endpoint_ty
domainName=domain_name,
regionalCertificateArn=certificate_arn,
endpointConfiguration=endpoint_configuration,
- securityPolicy=security_policy
+ securityPolicy=security_policy,
)
@AWSRetry.jittered_backoff(**retry_params)
def add_domain_mapping(client, domain_name, base_path, rest_api_id, stage):
- return client.create_base_path_mapping(domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage)
+ return client.create_base_path_mapping(
+ domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage=stage
+ )
@AWSRetry.jittered_backoff(**retry_params)
@@ -294,29 +303,29 @@ def delete_domain_mapping(client, domain_name, base_path):
def main():
argument_spec = dict(
- domain_name=dict(type='str', required=True),
- certificate_arn=dict(type='str', required=True),
- security_policy=dict(type='str', default='TLS_1_2', choices=['TLS_1_0', 'TLS_1_2']),
- endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE']),
- domain_mappings=dict(type='list', required=True, elements='dict'),
- state=dict(type='str', default='present', choices=['present', 'absent'])
+ domain_name=dict(type="str", required=True),
+ certificate_arn=dict(type="str", required=True),
+ security_policy=dict(type="str", default="TLS_1_2", choices=["TLS_1_0", "TLS_1_2"]),
+ endpoint_type=dict(type="str", default="EDGE", choices=["EDGE", "REGIONAL", "PRIVATE"]),
+ domain_mappings=dict(type="list", required=True, elements="dict"),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- supports_check_mode=False
+ supports_check_mode=False,
)
- client = module.client('apigateway')
+ client = module.client("apigateway")
- state = module.params.get('state')
+ state = module.params.get("state")
changed = False
if state == "present":
existing_domain = get_domain(module, client)
if existing_domain is not None:
result = update_domain(module, client, existing_domain)
- changed = result['updated']
+ changed = result["updated"]
else:
result = create_domain(module, client)
changed = True
@@ -327,10 +336,10 @@ def main():
exit_args = {"changed": changed}
if result is not None:
- exit_args['response'] = result
+ exit_args["response"] = result
module.exit_json(**exit_args)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/api_gateway_info.py b/ansible_collections/community/aws/plugins/modules/api_gateway_info.py
new file mode 100644
index 000000000..fd38d795a
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/api_gateway_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: api_gateway_info
+version_added: 6.1.0
+short_description: Gather information about ec2 instances in AWS
+description:
+ - Gather information about ec2 instances in AWS
+options:
+ ids:
+ description:
+ - The list of the string identifiers of the associated RestApis.
+ type: list
+ elements: str
+author:
+ - Aubin Bikouo (@abikouo)
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+---
+# List all API gateway
+- name: List all for a specific function
+ community.aws.api_gateway_info:
+
+# Get information for a specific API gateway
+- name: List all for a specific function
+ community.aws.api_gateway_info:
+ ids:
+ - 012345678a
+ - abcdefghij
+"""
+
+RETURN = r"""
+---
+rest_apis:
+ description: A list of API gateway.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description: The name of the API.
+ returned: success
+ type: str
+ sample: 'ansible-tmp-api'
+ id:
+ description: The identifier of the API.
+ returned: success
+ type: str
+ sample: 'abcdefgh'
+ api_key_source:
+ description: The source of the API key for metering requests according to a usage plan.
+ returned: success
+ type: str
+ sample: 'HEADER'
+ created_date:
+ description: The timestamp when the API was created.
+ returned: success
+ type: str
+ sample: "2020-01-01T11:37:59+00:00"
+ description:
+ description: The description of the API.
+ returned: success
+ type: str
+ sample: "Automatic deployment by Ansible."
+ disable_execute_api_endpoint:
+ description: Specifies whether clients can invoke your API by using the default execute-api endpoint.
+ returned: success
+ type: bool
+ sample: False
+ endpoint_configuration:
+ description: The endpoint configuration of this RestApi showing the endpoint types of the API.
+ returned: success
+ type: dict
+ sample: {"types": ["REGIONAL"]}
+ tags:
+ description: The collection of tags.
+ returned: success
+ type: dict
+ sample: {"key": "value"}
+"""
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+
+@AWSRetry.jittered_backoff()
+def _list_rest_apis(connection, **params):
+ paginator = connection.get_paginator("get_rest_apis")
+ return paginator.paginate(**params).build_full_result().get("items", [])
+
+
+@AWSRetry.jittered_backoff()
+def _describe_rest_api(connection, module, rest_api_id):
+ try:
+ response = connection.get_rest_api(restApiId=rest_api_id)
+ response.pop("ResponseMetadata")
+ except is_boto3_error_code("ResourceNotFoundException"):
+ response = {}
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Trying to get Rest API '{rest_api_id}'.")
+ return response
+
+
+def main():
+ argument_spec = dict(
+ ids=dict(type="list", elements="str"),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client("apigateway")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
+
+ ids = module.params.get("ids")
+ if ids:
+ rest_apis = []
+ for rest_api_id in ids:
+ result = _describe_rest_api(connection, module, rest_api_id)
+ if result:
+ rest_apis.append(result)
+ else:
+ rest_apis = _list_rest_apis(connection)
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_rest_apis = [camel_dict_to_snake_dict(item) for item in rest_apis]
+ module.exit_json(changed=False, rest_apis=snaked_rest_apis)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py b/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py
index d20c107de..beb2247ac 100644
--- a/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py
+++ b/ansible_collections/community/aws/plugins/modules/application_autoscaling_policy.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: application_autoscaling_policy
version_added: 1.0.0
@@ -104,12 +102,12 @@ options:
required: false
type: bool
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create step scaling policy for ECS Service
@@ -160,9 +158,9 @@ EXAMPLES = '''
service_namespace: ecs
resource_id: service/cluster-name/service-name
scalable_dimension: ecs:service:DesiredCount
-'''
+"""
-RETURN = '''
+RETURN = r"""
alarms:
description: List of the CloudWatch alarms associated with the scaling policy
returned: when state present
@@ -283,27 +281,29 @@ creation_time:
returned: when state present
type: str
sample: '2017-09-28T08:22:51.881000-03:00'
-''' # NOQA
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict
+"""
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import _camel_to_snake
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
# Merge the results of the scalable target creation and policy deletion/creation
# There's no risk in overriding values since mutual keys have the same values in our case
def merge_results(scalable_target_result, policy_result):
- if scalable_target_result['changed'] or policy_result['changed']:
+ if scalable_target_result["changed"] or policy_result["changed"]:
changed = True
else:
changed = False
- merged_response = scalable_target_result['response'].copy()
- merged_response.update(policy_result['response'])
+ merged_response = scalable_target_result["response"].copy()
+ merged_response.update(policy_result["response"])
return {"changed": changed, "response": merged_response}
@@ -312,22 +312,22 @@ def delete_scaling_policy(connection, module):
changed = False
try:
scaling_policy = connection.describe_scaling_policies(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyNames=[module.params.get('policy_name')],
- MaxResults=1
+ ServiceNamespace=module.params.get("service_namespace"),
+ ResourceId=module.params.get("resource_id"),
+ ScalableDimension=module.params.get("scalable_dimension"),
+ PolicyNames=[module.params.get("policy_name")],
+ MaxResults=1,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
- if scaling_policy['ScalingPolicies']:
+ if scaling_policy["ScalingPolicies"]:
try:
connection.delete_scaling_policy(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyName=module.params.get('policy_name'),
+ ServiceNamespace=module.params.get("service_namespace"),
+ ResourceId=module.params.get("resource_id"),
+ ScalableDimension=module.params.get("scalable_dimension"),
+ PolicyName=module.params.get("policy_name"),
)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -341,11 +341,11 @@ def create_scalable_target(connection, module):
try:
scalable_targets = connection.describe_scalable_targets(
- ServiceNamespace=module.params.get('service_namespace'),
+ ServiceNamespace=module.params.get("service_namespace"),
ResourceIds=[
- module.params.get('resource_id'),
+ module.params.get("resource_id"),
],
- ScalableDimension=module.params.get('scalable_dimension')
+ ScalableDimension=module.params.get("scalable_dimension"),
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scalable targets")
@@ -353,41 +353,38 @@ def create_scalable_target(connection, module):
# Scalable target registration will occur if:
# 1. There is no scalable target registered for this service
# 2. A scalable target exists, different min/max values are defined and override is set to "yes"
- if (
- not scalable_targets['ScalableTargets']
- or (
- module.params.get('override_task_capacity')
- and (
- scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks')
- or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks')
- )
+ if not scalable_targets["ScalableTargets"] or (
+ module.params.get("override_task_capacity")
+ and (
+ scalable_targets["ScalableTargets"][0]["MinCapacity"] != module.params.get("minimum_tasks")
+ or scalable_targets["ScalableTargets"][0]["MaxCapacity"] != module.params.get("maximum_tasks")
)
):
changed = True
try:
connection.register_scalable_target(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- MinCapacity=module.params.get('minimum_tasks'),
- MaxCapacity=module.params.get('maximum_tasks')
+ ServiceNamespace=module.params.get("service_namespace"),
+ ResourceId=module.params.get("resource_id"),
+ ScalableDimension=module.params.get("scalable_dimension"),
+ MinCapacity=module.params.get("minimum_tasks"),
+ MaxCapacity=module.params.get("maximum_tasks"),
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to register scalable target")
try:
response = connection.describe_scalable_targets(
- ServiceNamespace=module.params.get('service_namespace'),
+ ServiceNamespace=module.params.get("service_namespace"),
ResourceIds=[
- module.params.get('resource_id'),
+ module.params.get("resource_id"),
],
- ScalableDimension=module.params.get('scalable_dimension')
+ ScalableDimension=module.params.get("scalable_dimension"),
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scalable targets")
- if (response['ScalableTargets']):
- snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0])
+ if response["ScalableTargets"]:
+ snaked_response = camel_dict_to_snake_dict(response["ScalableTargets"][0])
else:
snaked_response = {}
@@ -397,78 +394,82 @@ def create_scalable_target(connection, module):
def create_scaling_policy(connection, module):
try:
scaling_policy = connection.describe_scaling_policies(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyNames=[module.params.get('policy_name')],
- MaxResults=1
+ ServiceNamespace=module.params.get("service_namespace"),
+ ResourceId=module.params.get("resource_id"),
+ ScalableDimension=module.params.get("scalable_dimension"),
+ PolicyNames=[module.params.get("policy_name")],
+ MaxResults=1,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
changed = False
- if scaling_policy['ScalingPolicies']:
- scaling_policy = scaling_policy['ScalingPolicies'][0]
+ if scaling_policy["ScalingPolicies"]:
+ scaling_policy = scaling_policy["ScalingPolicies"][0]
# check if the input parameters are equal to what's already configured
- for attr in ('PolicyName',
- 'ServiceNamespace',
- 'ResourceId',
- 'ScalableDimension',
- 'PolicyType',
- 'StepScalingPolicyConfiguration',
- 'TargetTrackingScalingPolicyConfiguration'):
+ for attr in (
+ "PolicyName",
+ "ServiceNamespace",
+ "ResourceId",
+ "ScalableDimension",
+ "PolicyType",
+ "StepScalingPolicyConfiguration",
+ "TargetTrackingScalingPolicyConfiguration",
+ ):
if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)):
changed = True
scaling_policy[attr] = module.params.get(_camel_to_snake(attr))
else:
changed = True
scaling_policy = {
- 'PolicyName': module.params.get('policy_name'),
- 'ServiceNamespace': module.params.get('service_namespace'),
- 'ResourceId': module.params.get('resource_id'),
- 'ScalableDimension': module.params.get('scalable_dimension'),
- 'PolicyType': module.params.get('policy_type'),
- 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'),
- 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration')
+ "PolicyName": module.params.get("policy_name"),
+ "ServiceNamespace": module.params.get("service_namespace"),
+ "ResourceId": module.params.get("resource_id"),
+ "ScalableDimension": module.params.get("scalable_dimension"),
+ "PolicyType": module.params.get("policy_type"),
+ "StepScalingPolicyConfiguration": module.params.get("step_scaling_policy_configuration"),
+ "TargetTrackingScalingPolicyConfiguration": module.params.get(
+ "target_tracking_scaling_policy_configuration"
+ ),
}
if changed:
try:
- if (module.params.get('step_scaling_policy_configuration')):
+ if module.params.get("step_scaling_policy_configuration"):
connection.put_scaling_policy(
- PolicyName=scaling_policy['PolicyName'],
- ServiceNamespace=scaling_policy['ServiceNamespace'],
- ResourceId=scaling_policy['ResourceId'],
- ScalableDimension=scaling_policy['ScalableDimension'],
- PolicyType=scaling_policy['PolicyType'],
- StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration']
+ PolicyName=scaling_policy["PolicyName"],
+ ServiceNamespace=scaling_policy["ServiceNamespace"],
+ ResourceId=scaling_policy["ResourceId"],
+ ScalableDimension=scaling_policy["ScalableDimension"],
+ PolicyType=scaling_policy["PolicyType"],
+ StepScalingPolicyConfiguration=scaling_policy["StepScalingPolicyConfiguration"],
)
- elif (module.params.get('target_tracking_scaling_policy_configuration')):
+ elif module.params.get("target_tracking_scaling_policy_configuration"):
connection.put_scaling_policy(
- PolicyName=scaling_policy['PolicyName'],
- ServiceNamespace=scaling_policy['ServiceNamespace'],
- ResourceId=scaling_policy['ResourceId'],
- ScalableDimension=scaling_policy['ScalableDimension'],
- PolicyType=scaling_policy['PolicyType'],
- TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration']
+ PolicyName=scaling_policy["PolicyName"],
+ ServiceNamespace=scaling_policy["ServiceNamespace"],
+ ResourceId=scaling_policy["ResourceId"],
+ ScalableDimension=scaling_policy["ScalableDimension"],
+ PolicyType=scaling_policy["PolicyType"],
+ TargetTrackingScalingPolicyConfiguration=scaling_policy["TargetTrackingScalingPolicyConfiguration"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create scaling policy")
try:
response = connection.describe_scaling_policies(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyNames=[module.params.get('policy_name')],
- MaxResults=1
+ ServiceNamespace=module.params.get("service_namespace"),
+ ResourceId=module.params.get("resource_id"),
+ ScalableDimension=module.params.get("scalable_dimension"),
+ PolicyNames=[module.params.get("policy_name")],
+ MaxResults=1,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
- if (response['ScalingPolicies']):
- snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0])
+ if response["ScalingPolicies"]:
+ snaked_response = camel_dict_to_snake_dict(response["ScalingPolicies"][0])
else:
snaked_response = {}
@@ -477,52 +478,63 @@ def create_scaling_policy(connection, module):
def main():
argument_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- policy_name=dict(type='str', required=True),
- service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']),
- resource_id=dict(type='str', required=True),
- scalable_dimension=dict(type='str',
- required=True,
- choices=['ecs:service:DesiredCount',
- 'ec2:spot-fleet-request:TargetCapacity',
- 'elasticmapreduce:instancegroup:InstanceCount',
- 'appstream:fleet:DesiredCapacity',
- 'dynamodb:table:ReadCapacityUnits',
- 'dynamodb:table:WriteCapacityUnits',
- 'dynamodb:index:ReadCapacityUnits',
- 'dynamodb:index:WriteCapacityUnits']),
- policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']),
- step_scaling_policy_configuration=dict(type='dict'),
+ state=dict(type="str", required=True, choices=["present", "absent"]),
+ policy_name=dict(type="str", required=True),
+ service_namespace=dict(
+ type="str", required=True, choices=["appstream", "dynamodb", "ec2", "ecs", "elasticmapreduce"]
+ ),
+ resource_id=dict(type="str", required=True),
+ scalable_dimension=dict(
+ type="str",
+ required=True,
+ choices=[
+ "ecs:service:DesiredCount",
+ "ec2:spot-fleet-request:TargetCapacity",
+ "elasticmapreduce:instancegroup:InstanceCount",
+ "appstream:fleet:DesiredCapacity",
+ "dynamodb:table:ReadCapacityUnits",
+ "dynamodb:table:WriteCapacityUnits",
+ "dynamodb:index:ReadCapacityUnits",
+ "dynamodb:index:WriteCapacityUnits",
+ ],
+ ),
+ policy_type=dict(type="str", required=True, choices=["StepScaling", "TargetTrackingScaling"]),
+ step_scaling_policy_configuration=dict(type="dict"),
target_tracking_scaling_policy_configuration=dict(
- type='dict',
+ type="dict",
options=dict(
- CustomizedMetricSpecification=dict(type='dict'),
- DisableScaleIn=dict(type='bool'),
- PredefinedMetricSpecification=dict(type='dict'),
- ScaleInCooldown=dict(type='int'),
- ScaleOutCooldown=dict(type='int'),
- TargetValue=dict(type='float'),
- )
+ CustomizedMetricSpecification=dict(type="dict"),
+ DisableScaleIn=dict(type="bool"),
+ PredefinedMetricSpecification=dict(type="dict"),
+ ScaleInCooldown=dict(type="int"),
+ ScaleOutCooldown=dict(type="int"),
+ TargetValue=dict(type="float"),
+ ),
),
- minimum_tasks=dict(type='int'),
- maximum_tasks=dict(type='int'),
- override_task_capacity=dict(type='bool'),
+ minimum_tasks=dict(type="int"),
+ maximum_tasks=dict(type="int"),
+ override_task_capacity=dict(type="bool"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- connection = module.client('application-autoscaling')
+ connection = module.client("application-autoscaling")
# Remove any target_tracking_scaling_policy_configuration suboptions that are None
policy_config_options = [
- 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue'
+ "CustomizedMetricSpecification",
+ "DisableScaleIn",
+ "PredefinedMetricSpecification",
+ "ScaleInCooldown",
+ "ScaleOutCooldown",
+ "TargetValue",
]
- if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict):
+ if isinstance(module.params["target_tracking_scaling_policy_configuration"], dict):
for option in policy_config_options:
- if module.params['target_tracking_scaling_policy_configuration'][option] is None:
- module.params['target_tracking_scaling_policy_configuration'].pop(option)
+ if module.params["target_tracking_scaling_policy_configuration"][option] is None:
+ module.params["target_tracking_scaling_policy_configuration"].pop(option)
- if module.params.get("state") == 'present':
+ if module.params.get("state") == "present":
# A scalable target must be registered prior to creating a scaling policy
scalable_target_result = create_scalable_target(connection, module)
policy_result = create_scaling_policy(connection, module)
@@ -535,5 +547,5 @@ def main():
module.exit_json(**policy_result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py b/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py
index 8f585a102..94a8d031f 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_complete_lifecycle_action.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: autoscaling_complete_lifecycle_action
short_description: Completes the lifecycle action of an instance
@@ -37,12 +36,12 @@ options:
type: str
required: true
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Complete the lifecycle action
- aws_asg_complete_lifecycle_action:
@@ -50,47 +49,47 @@ EXAMPLES = '''
lifecycle_hook_name: my-lifecycle-hook
lifecycle_action_result: CONTINUE
instance_id: i-123knm1l2312
-'''
+"""
-RETURN = '''
+RETURN = r"""
---
status:
description: How things went
returned: success
type: str
sample: ["OK"]
-'''
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def main():
argument_spec = dict(
- asg_name=dict(required=True, type='str'),
- lifecycle_hook_name=dict(required=True, type='str'),
- lifecycle_action_result=dict(required=True, type='str', choices=['CONTINUE', 'ABANDON']),
- instance_id=dict(required=True, type='str')
+ asg_name=dict(required=True, type="str"),
+ lifecycle_hook_name=dict(required=True, type="str"),
+ lifecycle_action_result=dict(required=True, type="str", choices=["CONTINUE", "ABANDON"]),
+ instance_id=dict(required=True, type="str"),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- asg_name = module.params.get('asg_name')
- lifecycle_hook_name = module.params.get('lifecycle_hook_name')
- lifecycle_action_result = module.params.get('lifecycle_action_result')
- instance_id = module.params.get('instance_id')
+ asg_name = module.params.get("asg_name")
+ lifecycle_hook_name = module.params.get("lifecycle_hook_name")
+ lifecycle_action_result = module.params.get("lifecycle_action_result")
+ instance_id = module.params.get("instance_id")
- autoscaling = module.client('autoscaling')
+ autoscaling = module.client("autoscaling")
try:
results = autoscaling.complete_lifecycle_action(
LifecycleHookName=lifecycle_hook_name,
AutoScalingGroupName=asg_name,
LifecycleActionResult=lifecycle_action_result,
- InstanceId=instance_id
+ InstanceId=instance_id,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to completes the lifecycle action")
@@ -98,5 +97,5 @@ def main():
module.exit_json(results=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py
index 94c2bb38c..b301fea94 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: autoscaling_instance_refresh
version_added: 3.2.0
@@ -61,12 +59,12 @@ options:
type: int
type: dict
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Start a refresh
@@ -86,10 +84,9 @@ EXAMPLES = '''
preferences:
min_healthy_percentage: 91
instance_warmup: 60
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
---
instance_refresh_id:
description: instance refresh id
@@ -137,20 +134,22 @@ instances_to_update:
returned: success
type: int
sample: 5
-'''
+"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def start_or_cancel_instance_refresh(conn, module):
"""
@@ -179,75 +178,75 @@ def start_or_cancel_instance_refresh(conn, module):
}
"""
- asg_state = module.params.get('state')
- asg_name = module.params.get('name')
- preferences = module.params.get('preferences')
+ asg_state = module.params.get("state")
+ asg_name = module.params.get("name")
+ preferences = module.params.get("preferences")
args = {}
- args['AutoScalingGroupName'] = asg_name
- if asg_state == 'started':
- args['Strategy'] = module.params.get('strategy')
+ args["AutoScalingGroupName"] = asg_name
+ if asg_state == "started":
+ args["Strategy"] = module.params.get("strategy")
if preferences:
- if asg_state == 'cancelled':
- module.fail_json(msg='can not pass preferences dict when canceling a refresh')
+ if asg_state == "cancelled":
+ module.fail_json(msg="can not pass preferences dict when canceling a refresh")
_prefs = scrub_none_parameters(preferences)
- args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True)
+ args["Preferences"] = snake_dict_to_camel_dict(_prefs, capitalize_first=True)
cmd_invocations = {
- 'cancelled': conn.cancel_instance_refresh,
- 'started': conn.start_instance_refresh,
+ "cancelled": conn.cancel_instance_refresh,
+ "started": conn.start_instance_refresh,
}
try:
if module.check_mode:
- if asg_state == 'started':
- ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')
+ if asg_state == "started":
+ ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get(
+ "InstanceRefreshes", "[]"
+ )
if ongoing_refresh:
- module.exit_json(changed=False, msg='In check_mode - Instance Refresh is already in progress, can not start new instance refresh.')
+ module.exit_json(
+ changed=False,
+ msg="In check_mode - Instance Refresh is already in progress, can not start new instance refresh.",
+ )
else:
- module.exit_json(changed=True, msg='Would have started instance refresh if not in check mode.')
- elif asg_state == 'cancelled':
- ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0]
- if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']:
- module.exit_json(changed=False, msg='In check_mode - Instance Refresh already cancelled or is pending cancellation.')
+ module.exit_json(changed=True, msg="Would have started instance refresh if not in check mode.")
+ elif asg_state == "cancelled":
+ ongoing_refresh = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name).get(
+ "InstanceRefreshes", "[]"
+ )[0]
+ if ongoing_refresh.get("Status", "") in ["Cancelling", "Cancelled"]:
+ module.exit_json(
+ changed=False,
+ msg="In check_mode - Instance Refresh already cancelled or is pending cancellation.",
+ )
elif not ongoing_refresh:
- module.exit_json(chaned=False, msg='In check_mode - No active referesh found, nothing to cancel.')
+ module.exit_json(chaned=False, msg="In check_mode - No active referesh found, nothing to cancel.")
else:
- module.exit_json(changed=True, msg='Would have cancelled instance refresh if not in check mode.')
+ module.exit_json(changed=True, msg="Would have cancelled instance refresh if not in check mode.")
result = cmd_invocations[asg_state](aws_retry=True, **args)
- instance_refreshes = conn.describe_instance_refreshes(AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']])
- result = dict(
- instance_refreshes=camel_dict_to_snake_dict(instance_refreshes['InstanceRefreshes'][0])
+ instance_refreshes = conn.describe_instance_refreshes(
+ AutoScalingGroupName=asg_name, InstanceRefreshIds=[result["InstanceRefreshId"]]
)
+ result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0]))
return module.exit_json(**result)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(
- e,
- msg='Failed to {0} InstanceRefresh'.format(
- asg_state.replace('ed', '')
- )
- )
+ module.fail_json_aws(e, msg=f"Failed to {asg_state.replace('ed', '')} InstanceRefresh")
def main():
-
argument_spec = dict(
state=dict(
- type='str',
+ type="str",
required=True,
- choices=['started', 'cancelled'],
+ choices=["started", "cancelled"],
),
name=dict(required=True),
- strategy=dict(
- type='str',
- default='Rolling',
- required=False
- ),
+ strategy=dict(type="str", default="Rolling", required=False),
preferences=dict(
- type='dict',
+ type="dict",
required=False,
options=dict(
- min_healthy_percentage=dict(type='int', default=90),
- instance_warmup=dict(type='int'),
- )
+ min_healthy_percentage=dict(type="int", default=90),
+ instance_warmup=dict(type="int"),
+ ),
),
)
@@ -256,15 +255,12 @@ def main():
supports_check_mode=True,
)
autoscaling = module.client(
- 'autoscaling',
- retry_decorator=AWSRetry.jittered_backoff(
- retries=10,
- catch_extra_error_codes=['InstanceRefreshInProgress']
- )
+ "autoscaling",
+ retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InstanceRefreshInProgress"]),
)
start_or_cancel_instance_refresh(autoscaling, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py
index 3037d0b52..639940b1b 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_instance_refresh_info.py
@@ -1,14 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: autoscaling_instance_refresh_info
version_added: 3.2.0
@@ -18,7 +14,8 @@ description:
- You can determine the status of a request by looking at the I(status) parameter.
- Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_instance_refresh_info).
The usage did not change.
-author: "Dan Khersonsky (@danquixote)"
+author:
+ - "Dan Khersonsky (@danquixote)"
options:
name:
description:
@@ -41,12 +38,12 @@ options:
type: int
required: false
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Find an refresh by ASG name
@@ -70,9 +67,9 @@ EXAMPLES = '''
name: somename-asg
next_token: 'some-token-123'
register: asgs
-'''
+"""
-RETURN = '''
+RETURN = r"""
---
instance_refresh_id:
description: instance refresh id
@@ -120,16 +117,19 @@ instances_to_update:
returned: success
type: int
sample: 5
-'''
+"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def find_asg_instance_refreshes(conn, module):
@@ -158,51 +158,51 @@ def find_asg_instance_refreshes(conn, module):
],
'next_token': 'string'
}
- """
+ """
- asg_name = module.params.get('name')
- asg_ids = module.params.get('ids')
- asg_next_token = module.params.get('next_token')
- asg_max_records = module.params.get('max_records')
+ asg_name = module.params.get("name")
+ asg_ids = module.params.get("ids")
+ asg_next_token = module.params.get("next_token")
+ asg_max_records = module.params.get("max_records")
args = {}
- args['AutoScalingGroupName'] = asg_name
+ args["AutoScalingGroupName"] = asg_name
if asg_ids:
- args['InstanceRefreshIds'] = asg_ids
+ args["InstanceRefreshIds"] = asg_ids
if asg_next_token:
- args['NextToken'] = asg_next_token
+ args["NextToken"] = asg_next_token
if asg_max_records:
- args['MaxRecords'] = asg_max_records
+ args["MaxRecords"] = asg_max_records
try:
instance_refreshes_result = {}
response = conn.describe_instance_refreshes(**args)
- if 'InstanceRefreshes' in response:
+ if "InstanceRefreshes" in response:
instance_refreshes_dict = dict(
- instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', ''))
- instance_refreshes_result = camel_dict_to_snake_dict(
- instance_refreshes_dict)
+ instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", "")
+ )
+ instance_refreshes_result = camel_dict_to_snake_dict(instance_refreshes_dict)
- while 'NextToken' in response:
- args['NextToken'] = response['NextToken']
+ while "NextToken" in response:
+ args["NextToken"] = response["NextToken"]
response = conn.describe_instance_refreshes(**args)
- if 'InstanceRefreshes' in response:
- instance_refreshes_dict = camel_dict_to_snake_dict(dict(
- instance_refreshes=response['InstanceRefreshes'], next_token=response.get('next_token', '')))
+ if "InstanceRefreshes" in response:
+ instance_refreshes_dict = camel_dict_to_snake_dict(
+ dict(instance_refreshes=response["InstanceRefreshes"], next_token=response.get("next_token", ""))
+ )
instance_refreshes_result.update(instance_refreshes_dict)
return module.exit_json(**instance_refreshes_result)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to describe InstanceRefreshes')
+ module.fail_json_aws(e, msg="Failed to describe InstanceRefreshes")
def main():
-
argument_spec = dict(
- name=dict(required=True, type='str'),
- ids=dict(required=False, default=[], elements='str', type='list'),
- next_token=dict(required=False, default=None, type='str', no_log=True),
- max_records=dict(required=False, type='int'),
+ name=dict(required=True, type="str"),
+ ids=dict(required=False, default=[], elements="str", type="list"),
+ next_token=dict(required=False, default=None, type="str", no_log=True),
+ max_records=dict(required=False, type="int"),
)
module = AnsibleAWSModule(
@@ -210,12 +210,9 @@ def main():
supports_check_mode=True,
)
- autoscaling = module.client(
- 'autoscaling',
- retry_decorator=AWSRetry.jittered_backoff(retries=10)
- )
+ autoscaling = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff(retries=10))
find_asg_instance_refreshes(autoscaling, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py
index 1b13d1027..78b7ee233 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
-# This file is part of Ansible
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: autoscaling_launch_config
version_added: 1.0.0
@@ -183,80 +180,86 @@ options:
type: str
choices: ['default', 'dedicated']
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: create a launch configuration with an encrypted volume
community.aws.autoscaling_launch_config:
name: special
image_id: ami-XXX
key_name: default
- security_groups: ['group', 'group2' ]
+ security_groups:
+ - 'group'
+ - 'group2'
instance_type: t1.micro
volumes:
- - device_name: /dev/sda1
- volume_size: 100
- volume_type: io1
- iops: 3000
- delete_on_termination: true
- encrypted: true
- - device_name: /dev/sdb
- ephemeral: ephemeral0
+ - device_name: /dev/sda1
+ volume_size: 100
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
+ encrypted: true
+ - device_name: /dev/sdb
+ ephemeral: ephemeral0
- name: create a launch configuration using a running instance id as a basis
community.aws.autoscaling_launch_config:
name: special
instance_id: i-00a48b207ec59e948
key_name: default
- security_groups: ['launch-wizard-2' ]
+ security_groups:
+ - 'launch-wizard-2'
volumes:
- - device_name: /dev/sda1
- volume_size: 120
- volume_type: io1
- iops: 3000
- delete_on_termination: true
+ - device_name: /dev/sda1
+ volume_size: 120
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
- name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image
community.aws.autoscaling_launch_config:
name: special
image_id: ami-XXX
key_name: default
- security_groups: ['group', 'group2' ]
+ security_groups:
+ - 'group'
+ - 'group2'
instance_type: t1.micro
volumes:
- - device_name: /dev/sdf
- no_device: true
+ - device_name: /dev/sdf
+ no_device: true
- name: Use EBS snapshot ID for volume
block:
- - name: Set Volume Facts
- ansible.builtin.set_fact:
- volumes:
- - device_name: /dev/sda1
- volume_size: 20
- ebs:
- snapshot: snap-XXXX
- volume_type: gp2
- delete_on_termination: true
- encrypted: false
-
- - name: Create launch configuration
- community.aws.autoscaling_launch_config:
- name: lc1
- image_id: ami-xxxx
- assign_public_ip: true
- instance_type: t2.medium
- key_name: my-key
- security_groups: "['sg-xxxx']"
- volumes: "{{ volumes }}"
- register: lc_info
-'''
-
-RETURN = r'''
+ - name: Set Volume Facts
+ ansible.builtin.set_fact:
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 20
+ ebs:
+ snapshot: snap-XXXX
+ volume_type: gp2
+ delete_on_termination: true
+ encrypted: false
+
+ - name: Create launch configuration
+ community.aws.autoscaling_launch_config:
+ name: lc1
+ image_id: ami-xxxx
+ assign_public_ip: true
+ instance_type: t2.medium
+ key_name: my-key
+ security_groups:
+ - 'sg-xxxx'
+ volumes: "{{ volumes }}"
+ register: lc_info
+"""
+
+RETURN = r"""
arn:
description: The Amazon Resource Name of the launch configuration.
returned: when I(state=present)
@@ -440,7 +443,7 @@ security_groups:
type: list
sample:
- sg-5e27db2f
-'''
+"""
import traceback
@@ -454,181 +457,220 @@ from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def create_block_device_meta(module, volume):
- if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume:
- if 'volume_size' not in volume:
- module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
- if 'snapshot' in volume:
- if volume.get('volume_type') == 'io1' and 'iops' not in volume:
- module.fail_json(msg='io1 volumes must have an iops value set')
- if 'ephemeral' in volume:
- if 'snapshot' in volume:
- module.fail_json(msg='Cannot set both ephemeral and snapshot')
+ if "snapshot" not in volume and "ephemeral" not in volume and "no_device" not in volume:
+ if "volume_size" not in volume:
+ module.fail_json(msg="Size must be specified when creating a new volume or modifying the root volume")
+ if "snapshot" in volume:
+ if volume.get("volume_type") == "io1" and "iops" not in volume:
+ module.fail_json(msg="io1 volumes must have an iops value set")
+ if "ephemeral" in volume:
+ if "snapshot" in volume:
+ module.fail_json(msg="Cannot set both ephemeral and snapshot")
return_object = {}
- if 'ephemeral' in volume:
- return_object['VirtualName'] = volume.get('ephemeral')
+ if "ephemeral" in volume:
+ return_object["VirtualName"] = volume.get("ephemeral")
- if 'device_name' in volume:
- return_object['DeviceName'] = volume.get('device_name')
+ if "device_name" in volume:
+ return_object["DeviceName"] = volume.get("device_name")
- if 'no_device' in volume:
- return_object['NoDevice'] = volume.get('no_device')
+ if "no_device" in volume:
+ return_object["NoDevice"] = volume.get("no_device")
- if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']):
- return_object['Ebs'] = {}
+ if any(
+ key in volume
+ for key in [
+ "snapshot",
+ "volume_size",
+ "volume_type",
+ "delete_on_termination",
+ "iops",
+ "throughput",
+ "encrypted",
+ ]
+ ):
+ return_object["Ebs"] = {}
- if 'snapshot' in volume:
- return_object['Ebs']['SnapshotId'] = volume.get('snapshot')
+ if "snapshot" in volume:
+ return_object["Ebs"]["SnapshotId"] = volume.get("snapshot")
- if 'volume_size' in volume:
- return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0))
+ if "volume_size" in volume:
+ return_object["Ebs"]["VolumeSize"] = int(volume.get("volume_size", 0))
- if 'volume_type' in volume:
- return_object['Ebs']['VolumeType'] = volume.get('volume_type')
+ if "volume_type" in volume:
+ return_object["Ebs"]["VolumeType"] = volume.get("volume_type")
- if 'delete_on_termination' in volume:
- return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False)
+ if "delete_on_termination" in volume:
+ return_object["Ebs"]["DeleteOnTermination"] = volume.get("delete_on_termination", False)
- if 'iops' in volume:
- return_object['Ebs']['Iops'] = volume.get('iops')
+ if "iops" in volume:
+ return_object["Ebs"]["Iops"] = volume.get("iops")
- if 'throughput' in volume:
- if volume.get('volume_type') != 'gp3':
- module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.')
- return_object['Ebs']['Throughput'] = volume.get('throughput')
+ if "throughput" in volume:
+ if volume.get("volume_type") != "gp3":
+ module.fail_json(msg="The throughput parameter is supported only for GP3 volumes.")
+ return_object["Ebs"]["Throughput"] = volume.get("throughput")
- if 'encrypted' in volume:
- return_object['Ebs']['Encrypted'] = volume.get('encrypted')
+ if "encrypted" in volume:
+ return_object["Ebs"]["Encrypted"] = volume.get("encrypted")
return return_object
def create_launch_config(connection, module):
- name = module.params.get('name')
- vpc_id = module.params.get('vpc_id')
+ name = module.params.get("name")
+ vpc_id = module.params.get("vpc_id")
try:
- ec2_connection = module.client('ec2')
+ ec2_connection = module.client("ec2")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
try:
- security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True)
+ security_groups = get_ec2_security_group_ids_from_names(
+ module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id, boto3=True
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to get Security Group IDs')
+ module.fail_json_aws(e, msg="Failed to get Security Group IDs")
except ValueError as e:
module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc())
- user_data = module.params.get('user_data')
- user_data_path = module.params.get('user_data_path')
- volumes = module.params['volumes']
- instance_monitoring = module.params.get('instance_monitoring')
- assign_public_ip = module.params.get('assign_public_ip')
- instance_profile_name = module.params.get('instance_profile_name')
- ebs_optimized = module.params.get('ebs_optimized')
- classic_link_vpc_id = module.params.get('classic_link_vpc_id')
- classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
+ user_data = module.params.get("user_data")
+ user_data_path = module.params.get("user_data_path")
+ volumes = module.params["volumes"]
+ instance_monitoring = module.params.get("instance_monitoring")
+ assign_public_ip = module.params.get("assign_public_ip")
+ instance_profile_name = module.params.get("instance_profile_name")
+ ebs_optimized = module.params.get("ebs_optimized")
+ classic_link_vpc_id = module.params.get("classic_link_vpc_id")
+ classic_link_vpc_security_groups = module.params.get("classic_link_vpc_security_groups")
block_device_mapping = []
- convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price']
-
- launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list)))
+ convert_list = [
+ "image_id",
+ "instance_type",
+ "instance_type",
+ "instance_id",
+ "placement_tenancy",
+ "key_name",
+ "kernel_id",
+ "ramdisk_id",
+ "spot_price",
+ ]
+
+ launch_config = snake_dict_to_camel_dict(
+ dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list)
+ )
if user_data_path:
try:
- with open(user_data_path, 'r') as user_data_file:
+ with open(user_data_path, "r") as user_data_file:
user_data = user_data_file.read()
except IOError as e:
module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc())
if volumes:
for volume in volumes:
- if 'device_name' not in volume:
- module.fail_json(msg='Device name must be set for volume')
+ if "device_name" not in volume:
+ module.fail_json(msg="Device name must be set for volume")
# Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume
- if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ if "volume_size" not in volume or int(volume["volume_size"]) > 0:
block_device_mapping.append(create_block_device_meta(module, volume))
try:
- launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get(
+ "LaunchConfigurations"
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe launch configuration by name")
changed = False
result = {}
- launch_config['LaunchConfigurationName'] = name
+ launch_config["LaunchConfigurationName"] = name
if security_groups is not None:
- launch_config['SecurityGroups'] = security_groups
+ launch_config["SecurityGroups"] = security_groups
if classic_link_vpc_id is not None:
- launch_config['ClassicLinkVPCId'] = classic_link_vpc_id
+ launch_config["ClassicLinkVPCId"] = classic_link_vpc_id
if instance_monitoring is not None:
- launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring}
+ launch_config["InstanceMonitoring"] = {"Enabled": instance_monitoring}
if classic_link_vpc_security_groups is not None:
- launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups
+ launch_config["ClassicLinkVPCSecurityGroups"] = classic_link_vpc_security_groups
if block_device_mapping:
- launch_config['BlockDeviceMappings'] = block_device_mapping
+ launch_config["BlockDeviceMappings"] = block_device_mapping
if instance_profile_name is not None:
- launch_config['IamInstanceProfile'] = instance_profile_name
+ launch_config["IamInstanceProfile"] = instance_profile_name
if assign_public_ip is not None:
- launch_config['AssociatePublicIpAddress'] = assign_public_ip
+ launch_config["AssociatePublicIpAddress"] = assign_public_ip
if user_data is not None:
- launch_config['UserData'] = user_data
+ launch_config["UserData"] = user_data
if ebs_optimized is not None:
- launch_config['EbsOptimized'] = ebs_optimized
+ launch_config["EbsOptimized"] = ebs_optimized
if len(launch_configs) == 0:
try:
connection.create_launch_configuration(**launch_config)
- launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get(
+ "LaunchConfigurations"
+ )
changed = True
if launch_configs:
launch_config = launch_configs[0]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create launch configuration")
- result = (dict((k, v) for k, v in launch_config.items()
- if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
+ result = dict(
+ (k, v)
+ for k, v in launch_config.items()
+ if k not in ["Connection", "CreatedTime", "InstanceMonitoring", "BlockDeviceMappings"]
+ )
- result['CreatedTime'] = to_text(launch_config.get('CreatedTime'))
+ result["CreatedTime"] = to_text(launch_config.get("CreatedTime"))
try:
- result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled'))
+ result["InstanceMonitoring"] = module.boolean(launch_config.get("InstanceMonitoring").get("Enabled"))
except AttributeError:
- result['InstanceMonitoring'] = False
-
- result['BlockDeviceMappings'] = []
-
- for block_device_mapping in launch_config.get('BlockDeviceMappings', []):
- result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName')))
- if block_device_mapping.get('Ebs') is not None:
- result['BlockDeviceMappings'][-1]['ebs'] = dict(
- snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize'))
+ result["InstanceMonitoring"] = False
+
+ result["BlockDeviceMappings"] = []
+
+ for block_device_mapping in launch_config.get("BlockDeviceMappings", []):
+ result["BlockDeviceMappings"].append(
+ dict(
+ device_name=block_device_mapping.get("DeviceName"), virtual_name=block_device_mapping.get("VirtualName")
+ )
+ )
+ if block_device_mapping.get("Ebs") is not None:
+ result["BlockDeviceMappings"][-1]["ebs"] = dict(
+ snapshot_id=block_device_mapping.get("Ebs").get("SnapshotId"),
+ volume_size=block_device_mapping.get("Ebs").get("VolumeSize"),
+ )
if user_data_path:
- result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal
+ result["UserData"] = "hidden" # Otherwise, we dump binary to the user's terminal
return_object = {
- 'Name': result.get('LaunchConfigurationName'),
- 'CreatedTime': result.get('CreatedTime'),
- 'ImageId': result.get('ImageId'),
- 'Arn': result.get('LaunchConfigurationARN'),
- 'SecurityGroups': result.get('SecurityGroups'),
- 'InstanceType': result.get('InstanceType'),
- 'Result': result
+ "Name": result.get("LaunchConfigurationName"),
+ "CreatedTime": result.get("CreatedTime"),
+ "ImageId": result.get("ImageId"),
+ "Arn": result.get("LaunchConfigurationARN"),
+ "SecurityGroups": result.get("SecurityGroups"),
+ "InstanceType": result.get("InstanceType"),
+ "Result": result,
}
module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
@@ -636,10 +678,14 @@ def create_launch_config(connection, module):
def delete_launch_config(connection, module):
try:
- name = module.params.get('name')
- launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
+ name = module.params.get("name")
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get(
+ "LaunchConfigurations"
+ )
if launch_configs:
- connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName'))
+ connection.delete_launch_configuration(
+ LaunchConfigurationName=launch_configs[0].get("LaunchConfigurationName")
+ )
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
@@ -653,42 +699,42 @@ def main():
image_id=dict(),
instance_id=dict(),
key_name=dict(),
- security_groups=dict(default=[], type='list', elements='str'),
+ security_groups=dict(default=[], type="list", elements="str"),
user_data=dict(),
- user_data_path=dict(type='path'),
+ user_data_path=dict(type="path"),
kernel_id=dict(),
- volumes=dict(type='list', elements='dict'),
+ volumes=dict(type="list", elements="dict"),
instance_type=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- spot_price=dict(type='float'),
+ state=dict(default="present", choices=["present", "absent"]),
+ spot_price=dict(type="float"),
ramdisk_id=dict(),
instance_profile_name=dict(),
- ebs_optimized=dict(default=False, type='bool'),
- instance_monitoring=dict(default=False, type='bool'),
- assign_public_ip=dict(type='bool'),
- classic_link_vpc_security_groups=dict(type='list', elements='str'),
+ ebs_optimized=dict(default=False, type="bool"),
+ instance_monitoring=dict(default=False, type="bool"),
+ assign_public_ip=dict(type="bool"),
+ classic_link_vpc_security_groups=dict(type="list", elements="str"),
classic_link_vpc_id=dict(),
vpc_id=dict(),
- placement_tenancy=dict(choices=['default', 'dedicated'])
+ placement_tenancy=dict(choices=["default", "dedicated"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- mutually_exclusive=[['user_data', 'user_data_path']],
+ mutually_exclusive=[["user_data", "user_data_path"]],
)
try:
- connection = module.client('autoscaling')
+ connection = module.client("autoscaling")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="unable to establish connection")
- state = module.params.get('state')
+ state = module.params.get("state")
- if state == 'present':
+ if state == "present":
create_launch_config(connection, module)
- elif state == 'absent':
+ elif state == "absent":
delete_launch_config(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py
index ae8f187c0..037c21ed9 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_find.py
@@ -1,14 +1,10 @@
#!/usr/bin/python
-# encoding: utf-8
+# -*- coding: utf-8 -*-
# (c) 2015, Jose Armesto <jose@armesto.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: autoscaling_launch_config_find
version_added: 1.0.0
@@ -40,12 +36,12 @@ options:
- Corresponds to Python slice notation like list[:limit].
type: int
extends_documentation_fragment:
- - amazon.aws.ec2
- - amazon.aws.aws
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Search for the Launch Configurations that start with "app"
@@ -53,9 +49,9 @@ EXAMPLES = '''
name_regex: app.*
sort_order: descending
limit: 2
-'''
+"""
-RETURN = '''
+RETURN = r"""
image_id:
description: AMI id
returned: when Launch Configuration was found
@@ -132,7 +128,8 @@ associate_public_address:
type: bool
sample: True
...
-'''
+"""
+
import re
try:
@@ -140,54 +137,50 @@ try:
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def find_launch_configs(client, module):
- name_regex = module.params.get('name_regex')
- sort_order = module.params.get('sort_order')
- limit = module.params.get('limit')
+ name_regex = module.params.get("name_regex")
+ sort_order = module.params.get("sort_order")
+ limit = module.params.get("limit")
- paginator = client.get_paginator('describe_launch_configurations')
+ paginator = client.get_paginator("describe_launch_configurations")
- response_iterator = paginator.paginate(
- PaginationConfig={
- 'MaxItems': 1000,
- 'PageSize': 100
- }
- )
+ response_iterator = paginator.paginate(PaginationConfig={"MaxItems": 1000, "PageSize": 100})
results = []
for response in response_iterator:
- response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
- response['LaunchConfigurations'])
+ response["LaunchConfigurations"] = filter(
+ lambda lc: re.compile(name_regex).match(lc["LaunchConfigurationName"]), response["LaunchConfigurations"]
+ )
- for lc in response['LaunchConfigurations']:
+ for lc in response["LaunchConfigurations"]:
data = {
- 'name': lc['LaunchConfigurationName'],
- 'arn': lc['LaunchConfigurationARN'],
- 'created_time': lc['CreatedTime'],
- 'user_data': lc['UserData'],
- 'instance_type': lc['InstanceType'],
- 'image_id': lc['ImageId'],
- 'ebs_optimized': lc['EbsOptimized'],
- 'instance_monitoring': lc['InstanceMonitoring'],
- 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
- 'block_device_mappings': lc['BlockDeviceMappings'],
- 'keyname': lc['KeyName'],
- 'security_groups': lc['SecurityGroups'],
- 'kernel_id': lc['KernelId'],
- 'ram_disk_id': lc['RamdiskId'],
- 'associate_public_address': lc.get('AssociatePublicIpAddress', False),
+ "name": lc["LaunchConfigurationName"],
+ "arn": lc["LaunchConfigurationARN"],
+ "created_time": lc["CreatedTime"],
+ "user_data": lc["UserData"],
+ "instance_type": lc["InstanceType"],
+ "image_id": lc["ImageId"],
+ "ebs_optimized": lc["EbsOptimized"],
+ "instance_monitoring": lc["InstanceMonitoring"],
+ "classic_link_vpc_security_groups": lc["ClassicLinkVPCSecurityGroups"],
+ "block_device_mappings": lc["BlockDeviceMappings"],
+ "keyname": lc["KeyName"],
+ "security_groups": lc["SecurityGroups"],
+ "kernel_id": lc["KernelId"],
+ "ram_disk_id": lc["RamdiskId"],
+ "associate_public_address": lc.get("AssociatePublicIpAddress", False),
}
results.append(data)
- results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
+ results.sort(key=lambda e: e["name"], reverse=(sort_order == "descending"))
if limit:
- results = results[:int(limit)]
+ results = results[:int(limit)] # fmt: skip
module.exit_json(changed=False, results=results)
@@ -195,8 +188,8 @@ def find_launch_configs(client, module):
def main():
argument_spec = dict(
name_regex=dict(required=True),
- sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
- limit=dict(required=False, type='int'),
+ sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]),
+ limit=dict(required=False, type="int"),
)
module = AnsibleAWSModule(
@@ -204,12 +197,12 @@ def main():
)
try:
- client = module.client('autoscaling')
+ client = module.client("autoscaling")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
find_launch_configs(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py
index 1c98d7588..f5123c2ef 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_launch_config_info.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: autoscaling_launch_config_info
version_added: 1.0.0
@@ -48,12 +45,12 @@ options:
- Corresponds to Python slice notation.
type: int
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all launch configurations
@@ -67,9 +64,9 @@ EXAMPLES = r'''
community.aws.autoscaling_launch_config_info:
sort: created_time
sort_order: descending
-'''
+"""
-RETURN = r'''
+RETURN = r"""
block_device_mapping:
description: Block device mapping for the instances of launch configuration.
type: list
@@ -149,43 +146,41 @@ user_data:
description: User data available.
type: str
returned: always
-'''
+"""
try:
import botocore
- from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def list_launch_configs(connection, module):
-
launch_config_name = module.params.get("name")
- sort = module.params.get('sort')
- sort_order = module.params.get('sort_order')
- sort_start = module.params.get('sort_start')
- sort_end = module.params.get('sort_end')
+ sort = module.params.get("sort")
+ sort_order = module.params.get("sort_order")
+ sort_start = module.params.get("sort_start")
+ sort_end = module.params.get("sort_end")
try:
- pg = connection.get_paginator('describe_launch_configurations')
+ pg = connection.get_paginator("describe_launch_configurations")
launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
- except ClientError as e:
+ except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Failed to list launch configs")
snaked_launch_configs = []
- for launch_config in launch_configs['LaunchConfigurations']:
+ for launch_config in launch_configs["LaunchConfigurations"]:
snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
for launch_config in snaked_launch_configs:
- if 'CreatedTime' in launch_config:
- launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
+ if "CreatedTime" in launch_config:
+ launch_config["CreatedTime"] = str(launch_config["CreatedTime"])
if sort:
- snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
+ snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == "descending"))
if sort and sort_start and sort_end:
snaked_launch_configs = snaked_launch_configs[sort_start:sort_end]
@@ -199,13 +194,23 @@ def list_launch_configs(connection, module):
def main():
argument_spec = dict(
- name=dict(required=False, default=[], type='list', elements='str'),
- sort=dict(required=False, default=None,
- choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
- sort_order=dict(required=False, default='ascending',
- choices=['ascending', 'descending']),
- sort_start=dict(required=False, type='int'),
- sort_end=dict(required=False, type='int'),
+ name=dict(required=False, default=[], type="list", elements="str"),
+ sort=dict(
+ required=False,
+ default=None,
+ choices=[
+ "launch_configuration_name",
+ "image_id",
+ "created_time",
+ "instance_type",
+ "kernel_id",
+ "ramdisk_id",
+ "key_name",
+ ],
+ ),
+ sort_order=dict(required=False, default="ascending", choices=["ascending", "descending"]),
+ sort_start=dict(required=False, type="int"),
+ sort_end=dict(required=False, type="int"),
)
module = AnsibleAWSModule(
@@ -214,12 +219,12 @@ def main():
)
try:
- connection = module.client('autoscaling')
+ connection = module.client("autoscaling")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
list_launch_configs(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py b/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py
index cf07b7681..a77fcce0a 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_lifecycle_hook.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: autoscaling_lifecycle_hook
version_added: 1.0.0
@@ -74,12 +71,12 @@ options:
default: ABANDON
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create / Update lifecycle hook
community.aws.autoscaling_lifecycle_hook:
region: eu-central-1
@@ -96,9 +93,9 @@ EXAMPLES = '''
state: absent
autoscaling_group_name: example
lifecycle_hook_name: example
-'''
+"""
-RETURN = '''
+RETURN = r"""
---
auto_scaling_group_name:
description: The unique name of the auto scaling group.
@@ -130,7 +127,7 @@ lifecycle_transition:
returned: success
type: str
sample: "autoscaling:EC2_INSTANCE_LAUNCHING"
-'''
+"""
try:
@@ -138,61 +135,64 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-def create_lifecycle_hook(connection, module):
- lch_name = module.params.get('lifecycle_hook_name')
- asg_name = module.params.get('autoscaling_group_name')
- transition = module.params.get('transition')
- role_arn = module.params.get('role_arn')
- notification_target_arn = module.params.get('notification_target_arn')
- notification_meta_data = module.params.get('notification_meta_data')
- heartbeat_timeout = module.params.get('heartbeat_timeout')
- default_result = module.params.get('default_result')
+def create_lifecycle_hook(connection, module):
+ lch_name = module.params.get("lifecycle_hook_name")
+ asg_name = module.params.get("autoscaling_group_name")
+ transition = module.params.get("transition")
+ role_arn = module.params.get("role_arn")
+ notification_target_arn = module.params.get("notification_target_arn")
+ notification_meta_data = module.params.get("notification_meta_data")
+ heartbeat_timeout = module.params.get("heartbeat_timeout")
+ default_result = module.params.get("default_result")
return_object = {}
- return_object['changed'] = False
+ return_object["changed"] = False
lch_params = {
- 'LifecycleHookName': lch_name,
- 'AutoScalingGroupName': asg_name,
- 'LifecycleTransition': transition
+ "LifecycleHookName": lch_name,
+ "AutoScalingGroupName": asg_name,
+ "LifecycleTransition": transition,
}
if role_arn:
- lch_params['RoleARN'] = role_arn
+ lch_params["RoleARN"] = role_arn
if notification_target_arn:
- lch_params['NotificationTargetARN'] = notification_target_arn
+ lch_params["NotificationTargetARN"] = notification_target_arn
if notification_meta_data:
- lch_params['NotificationMetadata'] = notification_meta_data
+ lch_params["NotificationMetadata"] = notification_meta_data
if heartbeat_timeout:
- lch_params['HeartbeatTimeout'] = heartbeat_timeout
+ lch_params["HeartbeatTimeout"] = heartbeat_timeout
if default_result:
- lch_params['DefaultResult'] = default_result
+ lch_params["DefaultResult"] = default_result
try:
existing_hook = connection.describe_lifecycle_hooks(
AutoScalingGroupName=asg_name,
- LifecycleHookNames=[lch_name]
- )['LifecycleHooks']
+ LifecycleHookNames=[lch_name],
+ )["LifecycleHooks"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get Lifecycle Hook")
if not existing_hook:
try:
if module.check_mode:
- module.exit_json(changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode.")
- return_object['changed'] = True
+ module.exit_json(
+ changed=True, msg="Would have created AutoScalingGroup Lifecycle Hook if not in check_mode."
+ )
+ return_object["changed"] = True
connection.put_lifecycle_hook(**lch_params)
- return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks(
- AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks']
+ return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name]
+ )["LifecycleHooks"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create LifecycleHook")
@@ -201,11 +201,14 @@ def create_lifecycle_hook(connection, module):
if modified:
try:
if module.check_mode:
- module.exit_json(changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode.")
- return_object['changed'] = True
+ module.exit_json(
+ changed=True, msg="Would have modified AutoScalingGroup Lifecycle Hook if not in check_mode."
+ )
+ return_object["changed"] = True
connection.put_lifecycle_hook(**lch_params)
- return_object['lifecycle_hook_info'] = connection.describe_lifecycle_hooks(
- AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name])['LifecycleHooks']
+ return_object["lifecycle_hook_info"] = connection.describe_lifecycle_hooks(
+ AutoScalingGroupName=asg_name, LifecycleHookNames=[lch_name]
+ )["LifecycleHooks"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create LifecycleHook")
@@ -229,33 +232,37 @@ def dict_compare(d1, d2):
def delete_lifecycle_hook(connection, module):
-
- lch_name = module.params.get('lifecycle_hook_name')
- asg_name = module.params.get('autoscaling_group_name')
+ lch_name = module.params.get("lifecycle_hook_name")
+ asg_name = module.params.get("autoscaling_group_name")
return_object = {}
- return_object['changed'] = False
+ return_object["changed"] = False
try:
all_hooks = connection.describe_lifecycle_hooks(
- AutoScalingGroupName=asg_name
+ AutoScalingGroupName=asg_name,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks")
- for hook in all_hooks['LifecycleHooks']:
- if hook['LifecycleHookName'] == lch_name:
+ for hook in all_hooks["LifecycleHooks"]:
+ if hook["LifecycleHookName"] == lch_name:
lch_params = {
- 'LifecycleHookName': lch_name,
- 'AutoScalingGroupName': asg_name
+ "LifecycleHookName": lch_name,
+ "AutoScalingGroupName": asg_name,
}
try:
if module.check_mode:
- module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode.")
+ module.exit_json(
+ changed=True, msg="Would have deleted AutoScalingGroup Lifecycle Hook if not in check_mode."
+ )
connection.delete_lifecycle_hook(**lch_params)
- return_object['changed'] = True
- return_object['lifecycle_hook_removed'] = {'LifecycleHookName': lch_name, 'AutoScalingGroupName': asg_name}
+ return_object["changed"] = True
+ return_object["lifecycle_hook_removed"] = {
+ "LifecycleHookName": lch_name,
+ "AutoScalingGroupName": asg_name,
+ }
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete LifecycleHook")
else:
@@ -266,34 +273,36 @@ def delete_lifecycle_hook(connection, module):
def main():
argument_spec = dict(
- autoscaling_group_name=dict(required=True, type='str'),
- lifecycle_hook_name=dict(required=True, type='str'),
- transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']),
- role_arn=dict(type='str'),
- notification_target_arn=dict(type='str'),
- notification_meta_data=dict(type='str'),
- heartbeat_timeout=dict(type='int'),
- default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']),
- state=dict(default='present', choices=['present', 'absent'])
+ autoscaling_group_name=dict(required=True, type="str"),
+ lifecycle_hook_name=dict(required=True, type="str"),
+ transition=dict(
+ type="str", choices=["autoscaling:EC2_INSTANCE_TERMINATING", "autoscaling:EC2_INSTANCE_LAUNCHING"]
+ ),
+ role_arn=dict(type="str"),
+ notification_target_arn=dict(type="str"),
+ notification_meta_data=dict(type="str"),
+ heartbeat_timeout=dict(type="int"),
+ default_result=dict(default="ABANDON", choices=["ABANDON", "CONTINUE"]),
+ state=dict(default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_if=[['state', 'present', ['transition']]],
+ required_if=[["state", "present", ["transition"]]],
)
- state = module.params.get('state')
+ state = module.params.get("state")
- connection = module.client('autoscaling')
+ connection = module.client("autoscaling")
changed = False
- if state == 'present':
+ if state == "present":
create_lifecycle_hook(connection, module)
- elif state == 'absent':
+ elif state == "absent":
delete_lifecycle_hook(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py b/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py
index a29389b0e..6d69d8492 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_policy.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: autoscaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
version_added: 1.0.0
@@ -189,11 +187,12 @@ options:
description:
- The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
-EXAMPLES = '''
+"""
+
+EXAMPLES = r"""
- name: Simple Scale Down policy
community.aws.autoscaling_policy:
state: present
@@ -224,7 +223,7 @@ EXAMPLES = '''
asg_name: "application-asg"
- name: create TargetTracking predefined policy
- ec2_scaling_policy:
+ community.aws.autoscaling_policy:
name: "predefined-policy-1"
policy_type: TargetTrackingScaling
target_tracking_config:
@@ -235,7 +234,7 @@ EXAMPLES = '''
register: result
- name: create TargetTracking predefined policy with resource_label
- ec2_scaling_policy:
+ community.aws.autoscaling_policy:
name: "predefined-policy-1"
policy_type: TargetTrackingScaling
target_tracking_config:
@@ -247,7 +246,7 @@ EXAMPLES = '''
register: result
- name: create TargetTrackingScaling custom policy
- ec2_scaling_policy:
+ community.aws.autoscaling_policy:
name: "custom-policy-1"
policy_type: TargetTrackingScaling
target_tracking_config:
@@ -261,9 +260,9 @@ EXAMPLES = '''
target_value: 98.0
asg_name: asg-test-1
register: result
-'''
+"""
-RETURN = '''
+RETURN = r"""
adjustment_type:
description: Scaling policy adjustment type.
returned: always
@@ -349,137 +348,146 @@ step_adjustments:
returned: always
type: int
sample: 50
-'''
+"""
try:
import botocore
except ImportError:
pass # caught by imported AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-def build_target_specification(target_tracking_config):
+def build_target_specification(target_tracking_config):
# Initialize an empty dict() for building TargetTrackingConfiguration policies,
# which will be returned
targetTrackingConfig = dict()
- if target_tracking_config.get('target_value'):
- targetTrackingConfig['TargetValue'] = target_tracking_config['target_value']
+ if target_tracking_config.get("target_value"):
+ targetTrackingConfig["TargetValue"] = target_tracking_config["target_value"]
- if target_tracking_config.get('disable_scalein'):
- targetTrackingConfig['DisableScaleIn'] = target_tracking_config['disable_scalein']
+ if target_tracking_config.get("disable_scalein"):
+ targetTrackingConfig["DisableScaleIn"] = target_tracking_config["disable_scalein"]
else:
# Accounting for boto3 response
- targetTrackingConfig['DisableScaleIn'] = False
+ targetTrackingConfig["DisableScaleIn"] = False
- if target_tracking_config['predefined_metric_spec'] is not None:
+ if target_tracking_config["predefined_metric_spec"] is not None:
# Build spec for predefined_metric_spec
- targetTrackingConfig['PredefinedMetricSpecification'] = dict()
- if target_tracking_config['predefined_metric_spec'].get('predefined_metric_type'):
- targetTrackingConfig['PredefinedMetricSpecification']['PredefinedMetricType'] = \
- target_tracking_config['predefined_metric_spec']['predefined_metric_type']
-
- if target_tracking_config['predefined_metric_spec'].get('resource_label'):
- targetTrackingConfig['PredefinedMetricSpecification']['ResourceLabel'] = \
- target_tracking_config['predefined_metric_spec']['resource_label']
-
- elif target_tracking_config['customized_metric_spec'] is not None:
+ targetTrackingConfig["PredefinedMetricSpecification"] = dict()
+ if target_tracking_config["predefined_metric_spec"].get("predefined_metric_type"):
+ targetTrackingConfig["PredefinedMetricSpecification"]["PredefinedMetricType"] = target_tracking_config[
+ "predefined_metric_spec"
+ ]["predefined_metric_type"]
+
+ if target_tracking_config["predefined_metric_spec"].get("resource_label"):
+ targetTrackingConfig["PredefinedMetricSpecification"]["ResourceLabel"] = target_tracking_config[
+ "predefined_metric_spec"
+ ]["resource_label"]
+
+ elif target_tracking_config["customized_metric_spec"] is not None:
# Build spec for customized_metric_spec
- targetTrackingConfig['CustomizedMetricSpecification'] = dict()
- if target_tracking_config['customized_metric_spec'].get('metric_name'):
- targetTrackingConfig['CustomizedMetricSpecification']['MetricName'] = \
- target_tracking_config['customized_metric_spec']['metric_name']
-
- if target_tracking_config['customized_metric_spec'].get('namespace'):
- targetTrackingConfig['CustomizedMetricSpecification']['Namespace'] = \
- target_tracking_config['customized_metric_spec']['namespace']
-
- if target_tracking_config['customized_metric_spec'].get('dimensions'):
- targetTrackingConfig['CustomizedMetricSpecification']['Dimensions'] = \
- target_tracking_config['customized_metric_spec']['dimensions']
-
- if target_tracking_config['customized_metric_spec'].get('statistic'):
- targetTrackingConfig['CustomizedMetricSpecification']['Statistic'] = \
- target_tracking_config['customized_metric_spec']['statistic']
-
- if target_tracking_config['customized_metric_spec'].get('unit'):
- targetTrackingConfig['CustomizedMetricSpecification']['Unit'] = \
- target_tracking_config['customized_metric_spec']['unit']
+ targetTrackingConfig["CustomizedMetricSpecification"] = dict()
+ if target_tracking_config["customized_metric_spec"].get("metric_name"):
+ targetTrackingConfig["CustomizedMetricSpecification"]["MetricName"] = target_tracking_config[
+ "customized_metric_spec"
+ ]["metric_name"]
+
+ if target_tracking_config["customized_metric_spec"].get("namespace"):
+ targetTrackingConfig["CustomizedMetricSpecification"]["Namespace"] = target_tracking_config[
+ "customized_metric_spec"
+ ]["namespace"]
+
+ if target_tracking_config["customized_metric_spec"].get("dimensions"):
+ targetTrackingConfig["CustomizedMetricSpecification"]["Dimensions"] = target_tracking_config[
+ "customized_metric_spec"
+ ]["dimensions"]
+
+ if target_tracking_config["customized_metric_spec"].get("statistic"):
+ targetTrackingConfig["CustomizedMetricSpecification"]["Statistic"] = target_tracking_config[
+ "customized_metric_spec"
+ ]["statistic"]
+
+ if target_tracking_config["customized_metric_spec"].get("unit"):
+ targetTrackingConfig["CustomizedMetricSpecification"]["Unit"] = target_tracking_config[
+ "customized_metric_spec"
+ ]["unit"]
return targetTrackingConfig
def create_scaling_policy(connection, module):
changed = False
- asg_name = module.params['asg_name']
- policy_type = module.params['policy_type']
- policy_name = module.params['name']
-
- if policy_type == 'TargetTrackingScaling':
- params = dict(PolicyName=policy_name,
- PolicyType=policy_type,
- AutoScalingGroupName=asg_name)
+ asg_name = module.params["asg_name"]
+ policy_type = module.params["policy_type"]
+ policy_name = module.params["name"]
+
+ if policy_type == "TargetTrackingScaling":
+ params = dict(PolicyName=policy_name, PolicyType=policy_type, AutoScalingGroupName=asg_name)
else:
- params = dict(PolicyName=policy_name,
- PolicyType=policy_type,
- AutoScalingGroupName=asg_name,
- AdjustmentType=module.params['adjustment_type'])
+ params = dict(
+ PolicyName=policy_name,
+ PolicyType=policy_type,
+ AutoScalingGroupName=asg_name,
+ AdjustmentType=module.params["adjustment_type"],
+ )
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
- if module.params['adjustment_type'] == 'PercentChangeInCapacity':
- if module.params['min_adjustment_step']:
- params['MinAdjustmentMagnitude'] = module.params['min_adjustment_step']
+ if module.params["adjustment_type"] == "PercentChangeInCapacity":
+ if module.params["min_adjustment_step"]:
+ params["MinAdjustmentMagnitude"] = module.params["min_adjustment_step"]
- if policy_type == 'SimpleScaling':
+ if policy_type == "SimpleScaling":
# can't use required_if because it doesn't allow multiple criteria -
# it's only required if policy is SimpleScaling and state is present
- if not module.params['scaling_adjustment']:
- module.fail_json(msg='scaling_adjustment is required when policy_type is SimpleScaling '
- 'and state is present')
- params['ScalingAdjustment'] = module.params['scaling_adjustment']
- if module.params['cooldown']:
- params['Cooldown'] = module.params['cooldown']
-
- elif policy_type == 'StepScaling':
- if not module.params['step_adjustments']:
- module.fail_json(msg='step_adjustments is required when policy_type is StepScaling'
- 'and state is present')
- params['StepAdjustments'] = []
- for step_adjustment in module.params['step_adjustments']:
- step_adjust_params = dict(
- ScalingAdjustment=step_adjustment['scaling_adjustment'])
- if step_adjustment.get('lower_bound'):
- step_adjust_params['MetricIntervalLowerBound'] = step_adjustment['lower_bound']
- if step_adjustment.get('upper_bound'):
- step_adjust_params['MetricIntervalUpperBound'] = step_adjustment['upper_bound']
- params['StepAdjustments'].append(step_adjust_params)
- if module.params['metric_aggregation']:
- params['MetricAggregationType'] = module.params['metric_aggregation']
- if module.params['estimated_instance_warmup']:
- params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup']
-
- elif policy_type == 'TargetTrackingScaling':
- if not module.params['target_tracking_config']:
- module.fail_json(msg='target_tracking_config is required when policy_type is '
- 'TargetTrackingScaling and state is present')
+ if not module.params["scaling_adjustment"]:
+ module.fail_json(
+ msg="scaling_adjustment is required when policy_type is SimpleScaling and state is present"
+ )
+ params["ScalingAdjustment"] = module.params["scaling_adjustment"]
+ if module.params["cooldown"]:
+ params["Cooldown"] = module.params["cooldown"]
+
+ elif policy_type == "StepScaling":
+ if not module.params["step_adjustments"]:
+ module.fail_json(msg="step_adjustments is required when policy_type is StepScaling and state is present")
+ params["StepAdjustments"] = []
+ for step_adjustment in module.params["step_adjustments"]:
+ step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"])
+ if step_adjustment.get("lower_bound"):
+ step_adjust_params["MetricIntervalLowerBound"] = step_adjustment["lower_bound"]
+ if step_adjustment.get("upper_bound"):
+ step_adjust_params["MetricIntervalUpperBound"] = step_adjustment["upper_bound"]
+ params["StepAdjustments"].append(step_adjust_params)
+ if module.params["metric_aggregation"]:
+ params["MetricAggregationType"] = module.params["metric_aggregation"]
+ if module.params["estimated_instance_warmup"]:
+ params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"]
+
+ elif policy_type == "TargetTrackingScaling":
+ if not module.params["target_tracking_config"]:
+ module.fail_json(
+ msg="target_tracking_config is required when policy_type is TargetTrackingScaling and state is present"
+ )
else:
- params['TargetTrackingConfiguration'] = build_target_specification(module.params.get('target_tracking_config'))
- if module.params['estimated_instance_warmup']:
- params['EstimatedInstanceWarmup'] = module.params['estimated_instance_warmup']
+ params["TargetTrackingConfiguration"] = build_target_specification(
+ module.params.get("target_tracking_config")
+ )
+ if module.params["estimated_instance_warmup"]:
+ params["EstimatedInstanceWarmup"] = module.params["estimated_instance_warmup"]
# Ensure idempotency with policies
try:
- policies = connection.describe_policies(aws_retry=True,
- AutoScalingGroupName=asg_name,
- PolicyNames=[policy_name])['ScalingPolicies']
+ policies = connection.describe_policies(
+ aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name]
+ )["ScalingPolicies"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+ module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}")
before = after = {}
if not policies:
@@ -499,41 +507,39 @@ def create_scaling_policy(connection, module):
module.fail_json_aws(e, msg="Failed to create autoscaling policy")
try:
- policies = connection.describe_policies(aws_retry=True,
- AutoScalingGroupName=asg_name,
- PolicyNames=[policy_name])['ScalingPolicies']
+ policies = connection.describe_policies(
+ aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name]
+ )["ScalingPolicies"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+ module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}")
policy = camel_dict_to_snake_dict(policies[0])
# Backward compatible return values
- policy['arn'] = policy['policy_arn']
- policy['as_name'] = policy['auto_scaling_group_name']
- policy['name'] = policy['policy_name']
+ policy["arn"] = policy["policy_arn"]
+ policy["as_name"] = policy["auto_scaling_group_name"]
+ policy["name"] = policy["policy_name"]
if before and after:
- module.exit_json(changed=changed, diff=dict(
- before=before, after=after), **policy)
+ module.exit_json(changed=changed, diff=dict(before=before, after=after), **policy)
else:
module.exit_json(changed=changed, **policy)
def delete_scaling_policy(connection, module):
- policy_name = module.params.get('name')
+ policy_name = module.params.get("name")
try:
- policy = connection.describe_policies(
- aws_retry=True, PolicyNames=[policy_name])
+ policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Failed to obtain autoscaling policy %s" % policy_name)
+ module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}")
- if policy['ScalingPolicies']:
+ if policy["ScalingPolicies"]:
try:
- connection.delete_policy(aws_retry=True,
- AutoScalingGroupName=policy['ScalingPolicies'][0]['AutoScalingGroupName'],
- PolicyName=policy_name)
+ connection.delete_policy(
+ aws_retry=True,
+ AutoScalingGroupName=policy["ScalingPolicies"][0]["AutoScalingGroupName"],
+ PolicyName=policy_name,
+ )
module.exit_json(changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete autoscaling policy")
@@ -543,65 +549,62 @@ def delete_scaling_policy(connection, module):
def main():
step_adjustment_spec = dict(
- lower_bound=dict(type='int'),
- upper_bound=dict(type='int'),
- scaling_adjustment=dict(type='int', required=True)
+ lower_bound=dict(type="int"), upper_bound=dict(type="int"), scaling_adjustment=dict(type="int", required=True)
)
predefined_metric_spec = dict(
- predefined_metric_type=dict(type='str', choices=['ASGAverageCPUUtilization',
- 'ASGAverageNetworkIn',
- 'ASGAverageNetworkOut',
- 'ALBRequestCountPerTarget'], required=True),
- resource_label=dict(type='str')
+ predefined_metric_type=dict(
+ type="str",
+ choices=[
+ "ASGAverageCPUUtilization",
+ "ASGAverageNetworkIn",
+ "ASGAverageNetworkOut",
+ "ALBRequestCountPerTarget",
+ ],
+ required=True,
+ ),
+ resource_label=dict(type="str"),
)
customized_metric_spec = dict(
- metric_name=dict(type='str', required=True),
- namespace=dict(type='str', required=True),
- statistic=dict(type='str', required=True, choices=['Average', 'Minimum', 'Maximum', 'SampleCount', 'Sum']),
- dimensions=dict(type='list', elements='dict'),
- unit=dict(type='str')
+ metric_name=dict(type="str", required=True),
+ namespace=dict(type="str", required=True),
+ statistic=dict(type="str", required=True, choices=["Average", "Minimum", "Maximum", "SampleCount", "Sum"]),
+ dimensions=dict(type="list", elements="dict"),
+ unit=dict(type="str"),
)
target_tracking_spec = dict(
- disable_scalein=dict(type='bool'),
- target_value=dict(type='float', required=True),
- predefined_metric_spec=dict(type='dict',
- options=predefined_metric_spec),
- customized_metric_spec=dict(type='dict',
- options=customized_metric_spec)
+ disable_scalein=dict(type="bool"),
+ target_value=dict(type="float", required=True),
+ predefined_metric_spec=dict(type="dict", options=predefined_metric_spec),
+ customized_metric_spec=dict(type="dict", options=customized_metric_spec),
)
argument_spec = dict(
name=dict(required=True),
- adjustment_type=dict(choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
+ adjustment_type=dict(choices=["ChangeInCapacity", "ExactCapacity", "PercentChangeInCapacity"]),
asg_name=dict(),
- scaling_adjustment=dict(type='int'),
- min_adjustment_step=dict(type='int'),
- cooldown=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- metric_aggregation=dict(default='Average', choices=[
- 'Minimum', 'Maximum', 'Average']),
- policy_type=dict(default='SimpleScaling', choices=[
- 'SimpleScaling', 'StepScaling', 'TargetTrackingScaling']),
- target_tracking_config=dict(type='dict', options=target_tracking_spec),
- step_adjustments=dict(
- type='list', options=step_adjustment_spec, elements='dict'),
- estimated_instance_warmup=dict(type='int')
+ scaling_adjustment=dict(type="int"),
+ min_adjustment_step=dict(type="int"),
+ cooldown=dict(type="int"),
+ state=dict(default="present", choices=["present", "absent"]),
+ metric_aggregation=dict(default="Average", choices=["Minimum", "Maximum", "Average"]),
+ policy_type=dict(default="SimpleScaling", choices=["SimpleScaling", "StepScaling", "TargetTrackingScaling"]),
+ target_tracking_config=dict(type="dict", options=target_tracking_spec),
+ step_adjustments=dict(type="list", options=step_adjustment_spec, elements="dict"),
+ estimated_instance_warmup=dict(type="int"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['asg_name']]])
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["asg_name"]]])
- connection = module.client(
- 'autoscaling', retry_decorator=AWSRetry.jittered_backoff())
- state = module.params.get('state')
+ connection = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff())
+ state = module.params.get("state")
- if state == 'present':
+ if state == "present":
create_scaling_policy(connection, module)
- elif state == 'absent':
+ elif state == "absent":
delete_scaling_policy(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py b/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py
index f1433c522..9bfb70b83 100644
--- a/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py
+++ b/ansible_collections/community/aws/plugins/modules/autoscaling_scheduled_action.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -6,10 +7,7 @@
# Based off of https://github.com/mmochan/ansible-aws-ec2-asg-scheduled-actions/blob/master/library/ec2_asg_scheduled_action.py
# (c) 2016, Mike Mochan <@mmochan>
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: autoscaling_scheduled_action
version_added: 2.2.0
@@ -67,14 +65,15 @@ options:
required: false
default: present
choices: ['present', 'absent']
-author: Mark Woolley(@marknet15)
+author:
+ - Mark Woolley(@marknet15)
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Create a scheduled action for a autoscaling group.
- name: Create a minimal scheduled action for autoscaling group
community.aws.autoscaling_scheduled_action:
@@ -108,9 +107,9 @@ EXAMPLES = r'''
autoscaling_group_name: test_asg
scheduled_action_name: test_scheduled_action
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
scheduled_action_name:
description: The name of the scheduled action.
returned: when I(state=present)
@@ -151,7 +150,7 @@ desired_capacity:
returned: when I(state=present)
type: int
sample: 1
-'''
+"""
try:
import botocore
@@ -160,39 +159,41 @@ except ImportError:
try:
from dateutil.parser import parse as timedate_parse
+
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def format_request():
params = dict(
- AutoScalingGroupName=module.params.get('autoscaling_group_name'),
- ScheduledActionName=module.params.get('scheduled_action_name'),
- Recurrence=module.params.get('recurrence')
+ AutoScalingGroupName=module.params.get("autoscaling_group_name"),
+ ScheduledActionName=module.params.get("scheduled_action_name"),
+ Recurrence=module.params.get("recurrence"),
)
# Some of these params are optional
- if module.params.get('desired_capacity') is not None:
- params['DesiredCapacity'] = module.params.get('desired_capacity')
+ if module.params.get("desired_capacity") is not None:
+ params["DesiredCapacity"] = module.params.get("desired_capacity")
- if module.params.get('min_size') is not None:
- params['MinSize'] = module.params.get('min_size')
+ if module.params.get("min_size") is not None:
+ params["MinSize"] = module.params.get("min_size")
- if module.params.get('max_size') is not None:
- params['MaxSize'] = module.params.get('max_size')
+ if module.params.get("max_size") is not None:
+ params["MaxSize"] = module.params.get("max_size")
- if module.params.get('time_zone') is not None:
- params['TimeZone'] = module.params.get('time_zone')
+ if module.params.get("time_zone") is not None:
+ params["TimeZone"] = module.params.get("time_zone")
- if module.params.get('start_time') is not None:
- params['StartTime'] = module.params.get('start_time')
+ if module.params.get("start_time") is not None:
+ params["StartTime"] = module.params.get("start_time")
- if module.params.get('end_time') is not None:
- params['EndTime'] = module.params.get('end_time')
+ if module.params.get("end_time") is not None:
+ params["EndTime"] = module.params.get("end_time")
return params
@@ -205,8 +206,8 @@ def delete_scheduled_action(current_actions):
return True
params = dict(
- AutoScalingGroupName=module.params.get('autoscaling_group_name'),
- ScheduledActionName=module.params.get('scheduled_action_name')
+ AutoScalingGroupName=module.params.get("autoscaling_group_name"),
+ ScheduledActionName=module.params.get("scheduled_action_name"),
)
try:
@@ -219,8 +220,8 @@ def delete_scheduled_action(current_actions):
def get_scheduled_actions():
params = dict(
- AutoScalingGroupName=module.params.get('autoscaling_group_name'),
- ScheduledActionNames=[module.params.get('scheduled_action_name')]
+ AutoScalingGroupName=module.params.get("autoscaling_group_name"),
+ ScheduledActionNames=[module.params.get("scheduled_action_name")],
)
try:
@@ -270,55 +271,53 @@ def main():
global client
argument_spec = dict(
- autoscaling_group_name=dict(required=True, type='str'),
- scheduled_action_name=dict(required=True, type='str'),
- start_time=dict(default=None, type='str'),
- end_time=dict(default=None, type='str'),
- time_zone=dict(default=None, type='str'),
- recurrence=dict(type='str'),
- min_size=dict(default=None, type='int'),
- max_size=dict(default=None, type='int'),
- desired_capacity=dict(default=None, type='int'),
- state=dict(default='present', choices=['present', 'absent'])
+ autoscaling_group_name=dict(required=True, type="str"),
+ scheduled_action_name=dict(required=True, type="str"),
+ start_time=dict(default=None, type="str"),
+ end_time=dict(default=None, type="str"),
+ time_zone=dict(default=None, type="str"),
+ recurrence=dict(type="str"),
+ min_size=dict(default=None, type="int"),
+ max_size=dict(default=None, type="int"),
+ desired_capacity=dict(default=None, type="int"),
+ state=dict(default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_if=[['state', 'present', ['recurrence']]],
- supports_check_mode=True
+ argument_spec=argument_spec, required_if=[["state", "present", ["recurrence"]]], supports_check_mode=True
)
if not HAS_DATEUTIL:
- module.fail_json(msg='dateutil is required for this module')
+ module.fail_json(msg="dateutil is required for this module")
if not module.botocore_at_least("1.20.24"):
- module.fail_json(msg='botocore version >= 1.20.24 is required for this module')
+ module.fail_json(msg="botocore version >= 1.20.24 is required for this module")
- client = module.client('autoscaling', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("autoscaling", retry_decorator=AWSRetry.jittered_backoff())
current_actions = get_scheduled_actions()
- state = module.params.get('state')
+ state = module.params.get("state")
results = dict()
- if state == 'present':
+ if state == "present":
changed = put_scheduled_update_group_action(current_actions)
if not module.check_mode:
updated_action = get_scheduled_actions()[0]
results = dict(
- scheduled_action_name=updated_action.get('ScheduledActionName'),
- start_time=updated_action.get('StartTime'),
- end_time=updated_action.get('EndTime'),
- time_zone=updated_action.get('TimeZone'),
- recurrence=updated_action.get('Recurrence'),
- min_size=updated_action.get('MinSize'),
- max_size=updated_action.get('MaxSize'),
- desired_capacity=updated_action.get('DesiredCapacity')
+ scheduled_action_name=updated_action.get("ScheduledActionName"),
+ start_time=updated_action.get("StartTime"),
+ end_time=updated_action.get("EndTime"),
+ time_zone=updated_action.get("TimeZone"),
+ recurrence=updated_action.get("Recurrence"),
+ min_size=updated_action.get("MinSize"),
+ max_size=updated_action.get("MaxSize"),
+ desired_capacity=updated_action.get("DesiredCapacity"),
)
else:
changed = delete_scheduled_action(current_actions)
- results['changed'] = changed
+ results["changed"] = changed
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/aws_region_info.py b/ansible_collections/community/aws/plugins/modules/aws_region_info.py
deleted file mode 100644
index 126455a8c..000000000
--- a/ansible_collections/community/aws/plugins/modules/aws_region_info.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: aws_region_info
-short_description: Gather information about AWS regions
-version_added: 1.0.0
-description:
- - Gather information about AWS regions.
-author:
- - 'Henrique Rodrigues (@Sodki)'
-options:
- filters:
- description:
- - A dict of filters to apply.
- - Each dict item consists of a filter key and a filter value.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters.
- - Filter names and values are case sensitive.
- - You can use underscores instead of dashes (-) in the filter keys.
- - Filter keys with underscores will take precedence in case of conflict.
- default: {}
- type: dict
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all regions
-- community.aws.aws_region_info:
-
-# Gather information about a single region
-- community.aws.aws_region_info:
- filters:
- region-name: eu-west-1
-'''
-
-RETURN = '''
-regions:
- returned: on success
- description: >
- Regions that match the provided filters. Each element consists of a dict with all the information related
- to that region.
- type: list
- sample: "[{
- 'endpoint': 'ec2.us-west-1.amazonaws.com',
- 'region_name': 'us-west-1'
- }]"
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-def main():
- argument_spec = dict(
- filters=dict(default={}, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
-
- # Replace filter key underscores with dashes, for compatibility
- sanitized_filters = dict(module.params.get('filters'))
- for k in module.params.get('filters').keys():
- if "_" in k:
- sanitized_filters[k.replace('_', '-')] = sanitized_filters[k]
- del sanitized_filters[k]
-
- try:
- regions = connection.describe_regions(
- aws_retry=True,
- Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to describe regions.")
-
- module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py b/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py
index 555cfccbe..e9a17f9a0 100644
--- a/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py
+++ b/ansible_collections/community/aws/plugins/modules/batch_compute_environment.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: batch_compute_environment
version_added: 1.0.0
@@ -120,12 +118,12 @@ options:
- The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: My Batch Compute Environment
community.aws.batch_compute_environment:
compute_environment_name: computeEnvironmentName
@@ -155,9 +153,9 @@ EXAMPLES = r'''
- name: show results
ansible.builtin.debug:
var: aws_batch_compute_environment_action
-'''
+"""
-RETURN = r'''
+RETURN = r"""
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
@@ -167,15 +165,15 @@ output:
changed: false
invocation:
module_args:
- aws_access_key: ~
- aws_secret_key: ~
+ access_key: ~
+ secret_key: ~
bid_percentage: ~
compute_environment_name: <name>
compute_environment_state: ENABLED
compute_resource_type: EC2
desiredv_cpus: 0
ec2_key_pair: ~
- ec2_url: ~
+ endpoint_url: ~
image_id: ~
instance_role: "arn:aws:iam::..."
instance_types:
@@ -222,17 +220,22 @@ output:
statusReason: "ComputeEnvironment Healthy"
type: MANAGED
type: dict
-'''
+"""
import re
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
# ---------------------------------------------------------------------------------------------------
#
@@ -240,6 +243,7 @@ except ImportError:
#
# ---------------------------------------------------------------------------------------------------
+
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
@@ -260,18 +264,16 @@ def validate_params(module):
:return:
"""
- compute_environment_name = module.params['compute_environment_name']
+ compute_environment_name = module.params["compute_environment_name"]
# validate compute environment name
- if not re.search(r'^[\w\_:]+$', compute_environment_name):
+ if not re.search(r"^[\w\_:]+$", compute_environment_name):
module.fail_json(
- msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
- "and underscores.".format(compute_environment_name)
+ msg=f"Function compute_environment_name {compute_environment_name} is invalid. Names must contain only alphanumeric characters and underscores."
)
- if not compute_environment_name.startswith('arn:aws:batch:'):
+ if not validate_aws_arn(compute_environment_name, service="batch"):
if len(compute_environment_name) > 128:
- module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
- .format(compute_environment_name))
+ module.fail_json(msg=f'compute_environment_name "{compute_environment_name}" exceeds 128 character limit')
return
@@ -282,13 +284,14 @@ def validate_params(module):
#
# ---------------------------------------------------------------------------------------------------
+
def get_current_compute_environment(module, client):
try:
environments = client.describe_compute_environments(
- computeEnvironments=[module.params['compute_environment_name']]
+ computeEnvironments=[module.params["compute_environment_name"]]
)
- if len(environments['computeEnvironments']) > 0:
- return environments['computeEnvironments'][0]
+ if len(environments["computeEnvironments"]) > 0:
+ return environments["computeEnvironments"][0]
else:
return None
except ClientError:
@@ -297,42 +300,52 @@ def get_current_compute_environment(module, client):
def create_compute_environment(module, client):
"""
- Adds a Batch compute environment
+ Adds a Batch compute environment
- :param module:
- :param client:
- :return:
- """
+ :param module:
+ :param client:
+ :return:
+ """
changed = False
# set API parameters
- params = (
- 'compute_environment_name', 'type', 'service_role')
+ params = ("compute_environment_name", "type", "service_role")
api_params = set_api_params(module, params)
- if module.params['compute_environment_state'] is not None:
- api_params['state'] = module.params['compute_environment_state']
-
- compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
- 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
- 'spot_iam_fleet_role')
+ if module.params["compute_environment_state"] is not None:
+ api_params["state"] = module.params["compute_environment_state"]
+
+ compute_resources_param_list = (
+ "minv_cpus",
+ "maxv_cpus",
+ "desiredv_cpus",
+ "instance_types",
+ "image_id",
+ "subnets",
+ "security_group_ids",
+ "ec2_key_pair",
+ "instance_role",
+ "tags",
+ "bid_percentage",
+ "spot_iam_fleet_role",
+ )
compute_resources_params = set_api_params(module, compute_resources_param_list)
- if module.params['compute_resource_type'] is not None:
- compute_resources_params['type'] = module.params['compute_resource_type']
+ if module.params["compute_resource_type"] is not None:
+ compute_resources_params["type"] = module.params["compute_resource_type"]
# if module.params['minv_cpus'] is not None:
# compute_resources_params['minvCpus'] = module.params['minv_cpus']
- api_params['computeResources'] = compute_resources_params
+ api_params["computeResources"] = compute_resources_params
try:
if not module.check_mode:
client.create_compute_environment(**api_params)
changed = True
except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg='Error creating compute environment')
+ module.fail_json_aws(e, msg="Error creating compute environment")
return changed
@@ -349,29 +362,29 @@ def remove_compute_environment(module, client):
changed = False
# set API parameters
- api_params = {'computeEnvironment': module.params['compute_environment_name']}
+ api_params = {"computeEnvironment": module.params["compute_environment_name"]}
try:
if not module.check_mode:
client.delete_compute_environment(**api_params)
changed = True
except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg='Error removing compute environment')
+ module.fail_json_aws(e, msg="Error removing compute environment")
return changed
def manage_state(module, client):
changed = False
- current_state = 'absent'
- state = module.params['state']
- compute_environment_state = module.params['compute_environment_state']
- compute_environment_name = module.params['compute_environment_name']
- service_role = module.params['service_role']
- minv_cpus = module.params['minv_cpus']
- maxv_cpus = module.params['maxv_cpus']
- desiredv_cpus = module.params['desiredv_cpus']
- action_taken = 'none'
- update_env_response = ''
+ current_state = "absent"
+ state = module.params["state"]
+ compute_environment_state = module.params["compute_environment_state"]
+ compute_environment_name = module.params["compute_environment_name"]
+ service_role = module.params["service_role"]
+ minv_cpus = module.params["minv_cpus"]
+ maxv_cpus = module.params["maxv_cpus"]
+ desiredv_cpus = module.params["desiredv_cpus"]
+ action_taken = "none"
+ update_env_response = ""
check_mode = module.check_mode
@@ -379,37 +392,40 @@ def manage_state(module, client):
current_compute_environment = get_current_compute_environment(module, client)
response = current_compute_environment
if current_compute_environment:
- current_state = 'present'
+ current_state = "present"
- if state == 'present':
- if current_state == 'present':
+ if state == "present":
+ if current_state == "present":
updates = False
# Update Batch Compute Environment configuration
- compute_kwargs = {'computeEnvironment': compute_environment_name}
+ compute_kwargs = {"computeEnvironment": compute_environment_name}
# Update configuration if needed
compute_resources = {}
- if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
- compute_kwargs.update({'state': compute_environment_state})
+ if compute_environment_state and current_compute_environment["state"] != compute_environment_state:
+ compute_kwargs.update({"state": compute_environment_state})
updates = True
- if service_role and current_compute_environment['serviceRole'] != service_role:
- compute_kwargs.update({'serviceRole': service_role})
+ if service_role and current_compute_environment["serviceRole"] != service_role:
+ compute_kwargs.update({"serviceRole": service_role})
updates = True
- if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
- compute_resources['minvCpus'] = minv_cpus
- if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
- compute_resources['maxvCpus'] = maxv_cpus
- if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
- compute_resources['desiredvCpus'] = desiredv_cpus
+ if minv_cpus is not None and current_compute_environment["computeResources"]["minvCpus"] != minv_cpus:
+ compute_resources["minvCpus"] = minv_cpus
+ if maxv_cpus is not None and current_compute_environment["computeResources"]["maxvCpus"] != maxv_cpus:
+ compute_resources["maxvCpus"] = maxv_cpus
+ if (
+ desiredv_cpus is not None
+ and current_compute_environment["computeResources"]["desiredvCpus"] != desiredv_cpus
+ ):
+ compute_resources["desiredvCpus"] = desiredv_cpus
if len(compute_resources) > 0:
- compute_kwargs['computeResources'] = compute_resources
+ compute_kwargs["computeResources"] = compute_resources
updates = True
if updates:
try:
if not check_mode:
update_env_response = client.update_compute_environment(**compute_kwargs)
if not update_env_response:
- module.fail_json(msg='Unable to get compute environment information after creating')
+ module.fail_json(msg="Unable to get compute environment information after creating")
changed = True
action_taken = "updated"
except (BotoCoreError, ClientError) as e:
@@ -419,15 +435,15 @@ def manage_state(module, client):
# Create Batch Compute Environment
changed = create_compute_environment(module, client)
# Describe compute environment
- action_taken = 'added'
+ action_taken = "added"
response = get_current_compute_environment(module, client)
if not response:
- module.fail_json(msg='Unable to get compute environment information after creating')
+ module.fail_json(msg="Unable to get compute environment information after creating")
else:
- if current_state == 'present':
+ if current_state == "present":
# remove the compute environment
changed = remove_compute_environment(module, client)
- action_taken = 'deleted'
+ action_taken = "deleted"
return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
@@ -437,6 +453,7 @@ def manage_state(module, client):
#
# ---------------------------------------------------------------------------------------------------
+
def main():
"""
Main entry point.
@@ -445,39 +462,36 @@ def main():
"""
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
+ state=dict(default="present", choices=["present", "absent"]),
compute_environment_name=dict(required=True),
- type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
- compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
+ type=dict(required=True, choices=["MANAGED", "UNMANAGED"]),
+ compute_environment_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]),
service_role=dict(required=True),
- compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
- minv_cpus=dict(type='int', required=True),
- maxv_cpus=dict(type='int', required=True),
- desiredv_cpus=dict(type='int'),
- instance_types=dict(type='list', required=True, elements='str'),
+ compute_resource_type=dict(required=True, choices=["EC2", "SPOT"]),
+ minv_cpus=dict(type="int", required=True),
+ maxv_cpus=dict(type="int", required=True),
+ desiredv_cpus=dict(type="int"),
+ instance_types=dict(type="list", required=True, elements="str"),
image_id=dict(),
- subnets=dict(type='list', required=True, elements='str'),
- security_group_ids=dict(type='list', required=True, elements='str'),
+ subnets=dict(type="list", required=True, elements="str"),
+ security_group_ids=dict(type="list", required=True, elements="str"),
ec2_key_pair=dict(no_log=False),
instance_role=dict(required=True),
- tags=dict(type='dict'),
- bid_percentage=dict(type='int'),
+ tags=dict(type="dict"),
+ bid_percentage=dict(type="int"),
spot_iam_fleet_role=dict(),
)
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- client = module.client('batch')
+ client = module.client("batch")
validate_params(module)
results = manage_state(module, client)
- module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
+ module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=["Tags"]))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/batch_job_definition.py b/ansible_collections/community/aws/plugins/modules/batch_job_definition.py
index 79ace0534..fb2b1996d 100644
--- a/ansible_collections/community/aws/plugins/modules/batch_job_definition.py
+++ b/ansible_collections/community/aws/plugins/modules/batch_job_definition.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: batch_job_definition
version_added: 1.0.0
@@ -179,12 +177,12 @@ options:
many times.
type: int
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
- name: My Batch Job Definition
community.aws.batch_job_definition:
@@ -207,9 +205,9 @@ EXAMPLES = r'''
- name: show results
ansible.builtin.debug: var=job_definition_create_result
-'''
+"""
-RETURN = r'''
+RETURN = r"""
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
@@ -223,17 +221,20 @@ output:
status: INACTIVE
type: container
type: dict
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.batch import cc, set_api_params
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.batch import cc
+from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
# ---------------------------------------------------------------------------------------------------
#
@@ -263,15 +264,15 @@ def validate_params(module, batch_client):
#
# ---------------------------------------------------------------------------------------------------
+
def get_current_job_definition(module, batch_client):
try:
- environments = batch_client.describe_job_definitions(
- jobDefinitionName=module.params['job_definition_name']
- )
- if len(environments['jobDefinitions']) > 0:
- latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions']))
- latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision),
- None)
+ environments = batch_client.describe_job_definitions(jobDefinitionName=module.params["job_definition_name"])
+ if len(environments["jobDefinitions"]) > 0:
+ latest_revision = max(map(lambda d: d["revision"], environments["jobDefinitions"]))
+ latest_definition = next(
+ (x for x in environments["jobDefinitions"] if x["revision"] == latest_revision), None
+ )
return latest_definition
return None
except ClientError:
@@ -280,12 +281,12 @@ def get_current_job_definition(module, batch_client):
def create_job_definition(module, batch_client):
"""
- Adds a Batch job definition
+ Adds a Batch job definition
- :param module:
- :param batch_client:
- :return:
- """
+ :param module:
+ :param batch_client:
+ :return:
+ """
changed = False
@@ -294,36 +295,48 @@ def create_job_definition(module, batch_client):
container_properties_params = set_api_params(module, get_container_property_params())
retry_strategy_params = set_api_params(module, get_retry_strategy_params())
- api_params['retryStrategy'] = retry_strategy_params
- api_params['containerProperties'] = container_properties_params
+ api_params["retryStrategy"] = retry_strategy_params
+ api_params["containerProperties"] = container_properties_params
try:
if not module.check_mode:
batch_client.register_job_definition(**api_params)
changed = True
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error registering job definition')
+ module.fail_json_aws(e, msg="Error registering job definition")
return changed
def get_retry_strategy_params():
- return ('attempts',)
+ return ("attempts",)
def get_container_property_params():
- return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points',
- 'readonly_root_filesystem', 'privileged', 'ulimits', 'user')
+ return (
+ "image",
+ "vcpus",
+ "memory",
+ "command",
+ "job_role_arn",
+ "volumes",
+ "environment",
+ "mount_points",
+ "readonly_root_filesystem",
+ "privileged",
+ "ulimits",
+ "user",
+ )
def get_base_params():
- return 'job_definition_name', 'type', 'parameters'
+ return "job_definition_name", "type", "parameters"
def get_compute_environment_order_list(module):
compute_environment_order_list = []
- for ceo in module.params['compute_environment_order']:
- compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
+ for ceo in module.params["compute_environment_order"]:
+ compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"]))
return compute_environment_order_list
@@ -340,10 +353,10 @@ def remove_job_definition(module, batch_client):
try:
if not module.check_mode:
- batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn'])
+ batch_client.deregister_job_definition(jobDefinition=module.params["job_definition_arn"])
changed = True
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error removing job definition')
+ module.fail_json_aws(e, msg="Error removing job definition")
return changed
@@ -356,12 +369,12 @@ def job_definition_equal(module, current_definition):
break
for param in get_container_property_params():
- if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)):
+ if module.params.get(param) != current_definition.get("containerProperties").get(cc(param)):
equal = False
break
for param in get_retry_strategy_params():
- if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)):
+ if module.params.get(param) != current_definition.get("retryStrategy").get(cc(param)):
equal = False
break
@@ -370,10 +383,10 @@ def job_definition_equal(module, current_definition):
def manage_state(module, batch_client):
changed = False
- current_state = 'absent'
- state = module.params['state']
- job_definition_name = module.params['job_definition_name']
- action_taken = 'none'
+ current_state = "absent"
+ state = module.params["state"]
+ job_definition_name = module.params["job_definition_name"]
+ action_taken = "none"
response = None
check_mode = module.check_mode
@@ -381,28 +394,28 @@ def manage_state(module, batch_client):
# check if the job definition exists
current_job_definition = get_current_job_definition(module, batch_client)
if current_job_definition:
- current_state = 'present'
+ current_state = "present"
- if state == 'present':
- if current_state == 'present':
+ if state == "present":
+ if current_state == "present":
# check if definition has changed and register a new version if necessary
if not job_definition_equal(module, current_job_definition):
create_job_definition(module, batch_client)
- action_taken = 'updated with new version'
+ action_taken = "updated with new version"
changed = True
else:
# Create Job definition
changed = create_job_definition(module, batch_client)
- action_taken = 'added'
+ action_taken = "added"
response = get_current_job_definition(module, batch_client)
if not response:
- module.fail_json(msg='Unable to get job definition information after creating/updating')
+ module.fail_json(msg="Unable to get job definition information after creating/updating")
else:
- if current_state == 'present':
+ if current_state == "present":
# remove the Job definition
changed = remove_job_definition(module, batch_client)
- action_taken = 'deregistered'
+ action_taken = "deregistered"
return dict(changed=changed, batch_job_definition_action=action_taken, response=response)
@@ -412,6 +425,7 @@ def manage_state(module, batch_client):
#
# ---------------------------------------------------------------------------------------------------
+
def main():
"""
Main entry point.
@@ -420,32 +434,29 @@ def main():
"""
argument_spec = dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
+ state=dict(required=False, default="present", choices=["present", "absent"]),
job_definition_name=dict(required=True),
job_definition_arn=dict(),
type=dict(required=True),
- parameters=dict(type='dict'),
+ parameters=dict(type="dict"),
image=dict(required=True),
- vcpus=dict(type='int', required=True),
- memory=dict(type='int', required=True),
- command=dict(type='list', default=[], elements='str'),
+ vcpus=dict(type="int", required=True),
+ memory=dict(type="int", required=True),
+ command=dict(type="list", default=[], elements="str"),
job_role_arn=dict(),
- volumes=dict(type='list', default=[], elements='dict'),
- environment=dict(type='list', default=[], elements='dict'),
- mount_points=dict(type='list', default=[], elements='dict'),
+ volumes=dict(type="list", default=[], elements="dict"),
+ environment=dict(type="list", default=[], elements="dict"),
+ mount_points=dict(type="list", default=[], elements="dict"),
readonly_root_filesystem=dict(),
privileged=dict(),
- ulimits=dict(type='list', default=[], elements='dict'),
+ ulimits=dict(type="list", default=[], elements="dict"),
user=dict(),
- attempts=dict(type='int')
+ attempts=dict(type="int"),
)
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- batch_client = module.client('batch')
+ batch_client = module.client("batch")
validate_params(module, batch_client)
@@ -454,5 +465,5 @@ def main():
module.exit_json(**camel_dict_to_snake_dict(results))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/batch_job_queue.py b/ansible_collections/community/aws/plugins/modules/batch_job_queue.py
index ef48896a4..4be42cbc5 100644
--- a/ansible_collections/community/aws/plugins/modules/batch_job_queue.py
+++ b/ansible_collections/community/aws/plugins/modules/batch_job_queue.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: batch_job_queue
version_added: 1.0.0
@@ -63,12 +61,12 @@ options:
type: str
description: The name of the compute environment.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: My Batch Job Queue
community.aws.batch_job_queue:
job_queue_name: jobQueueName
@@ -77,18 +75,18 @@ EXAMPLES = '''
job_queue_state: ENABLED
priority: 1
compute_environment_order:
- - order: 1
- compute_environment: my_compute_env1
- - order: 2
- compute_environment: my_compute_env2
+ - order: 1
+ compute_environment: my_compute_env1
+ - order: 2
+ compute_environment: my_compute_env2
register: batch_job_queue_action
- name: show results
ansible.builtin.debug:
var: batch_job_queue_action
-'''
+"""
-RETURN = r'''
+RETURN = r"""
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
@@ -104,17 +102,20 @@ output:
status: UPDATING
status_reason: "JobQueue Healthy"
type: dict
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.batch import set_api_params
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
@@ -137,50 +138,49 @@ def validate_params(module):
#
# ---------------------------------------------------------------------------------------------------
+
def get_current_job_queue(module, client):
try:
- environments = client.describe_job_queues(
- jobQueues=[module.params['job_queue_name']]
- )
- return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None
+ environments = client.describe_job_queues(jobQueues=[module.params["job_queue_name"]])
+ return environments["jobQueues"][0] if len(environments["jobQueues"]) > 0 else None
except ClientError:
return None
def create_job_queue(module, client):
"""
- Adds a Batch job queue
+ Adds a Batch job queue
- :param module:
- :param client:
- :return:
- """
+ :param module:
+ :param client:
+ :return:
+ """
changed = False
# set API parameters
- params = ('job_queue_name', 'priority')
+ params = ("job_queue_name", "priority")
api_params = set_api_params(module, params)
- if module.params['job_queue_state'] is not None:
- api_params['state'] = module.params['job_queue_state']
+ if module.params["job_queue_state"] is not None:
+ api_params["state"] = module.params["job_queue_state"]
- api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module)
+ api_params["computeEnvironmentOrder"] = get_compute_environment_order_list(module)
try:
if not module.check_mode:
client.create_job_queue(**api_params)
changed = True
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error creating compute environment')
+ module.fail_json_aws(e, msg="Error creating compute environment")
return changed
def get_compute_environment_order_list(module):
compute_environment_order_list = []
- for ceo in module.params['compute_environment_order']:
- compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
+ for ceo in module.params["compute_environment_order"]:
+ compute_environment_order_list.append(dict(order=ceo["order"], computeEnvironment=ceo["compute_environment"]))
return compute_environment_order_list
@@ -196,25 +196,25 @@ def remove_job_queue(module, client):
changed = False
# set API parameters
- api_params = {'jobQueue': module.params['job_queue_name']}
+ api_params = {"jobQueue": module.params["job_queue_name"]}
try:
if not module.check_mode:
client.delete_job_queue(**api_params)
changed = True
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error removing job queue')
+ module.fail_json_aws(e, msg="Error removing job queue")
return changed
def manage_state(module, client):
changed = False
- current_state = 'absent'
- state = module.params['state']
- job_queue_state = module.params['job_queue_state']
- job_queue_name = module.params['job_queue_name']
- priority = module.params['priority']
- action_taken = 'none'
+ current_state = "absent"
+ state = module.params["state"]
+ job_queue_state = module.params["job_queue_state"]
+ job_queue_name = module.params["job_queue_name"]
+ priority = module.params["priority"]
+ action_taken = "none"
response = None
check_mode = module.check_mode
@@ -222,25 +222,25 @@ def manage_state(module, client):
# check if the job queue exists
current_job_queue = get_current_job_queue(module, client)
if current_job_queue:
- current_state = 'present'
+ current_state = "present"
- if state == 'present':
- if current_state == 'present':
+ if state == "present":
+ if current_state == "present":
updates = False
# Update Batch Job Queue configuration
- job_kwargs = {'jobQueue': job_queue_name}
+ job_kwargs = {"jobQueue": job_queue_name}
# Update configuration if needed
- if job_queue_state and current_job_queue['state'] != job_queue_state:
- job_kwargs.update({'state': job_queue_state})
+ if job_queue_state and current_job_queue["state"] != job_queue_state:
+ job_kwargs.update({"state": job_queue_state})
updates = True
- if priority is not None and current_job_queue['priority'] != priority:
- job_kwargs.update({'priority': priority})
+ if priority is not None and current_job_queue["priority"] != priority:
+ job_kwargs.update({"priority": priority})
updates = True
new_compute_environment_order_list = get_compute_environment_order_list(module)
- if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']:
- job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list
+ if new_compute_environment_order_list != current_job_queue["computeEnvironmentOrder"]:
+ job_kwargs["computeEnvironmentOrder"] = new_compute_environment_order_list
updates = True
if updates:
@@ -255,17 +255,17 @@ def manage_state(module, client):
else:
# Create Job Queue
changed = create_job_queue(module, client)
- action_taken = 'added'
+ action_taken = "added"
# Describe job queue
response = get_current_job_queue(module, client)
if not response:
- module.fail_json(msg='Unable to get job queue information after creating/updating')
+ module.fail_json(msg="Unable to get job queue information after creating/updating")
else:
- if current_state == 'present':
+ if current_state == "present":
# remove the Job Queue
changed = remove_job_queue(module, client)
- action_taken = 'deleted'
+ action_taken = "deleted"
return dict(changed=changed, batch_job_queue_action=action_taken, response=response)
@@ -275,6 +275,7 @@ def manage_state(module, client):
#
# ---------------------------------------------------------------------------------------------------
+
def main():
"""
Main entry point.
@@ -283,19 +284,16 @@ def main():
"""
argument_spec = dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
+ state=dict(required=False, default="present", choices=["present", "absent"]),
job_queue_name=dict(required=True),
- job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
- priority=dict(type='int', required=True),
- compute_environment_order=dict(type='list', required=True, elements='dict'),
+ job_queue_state=dict(required=False, default="ENABLED", choices=["ENABLED", "DISABLED"]),
+ priority=dict(type="int", required=True),
+ compute_environment_order=dict(type="list", required=True, elements="dict"),
)
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- client = module.client('batch')
+ client = module.client("batch")
validate_params(module)
@@ -304,5 +302,5 @@ def main():
module.exit_json(**camel_dict_to_snake_dict(results))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py b/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
index f7e71e2f8..ff32b2124 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudformation_exports_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: cloudformation_exports_info
short_description: Read a value from CloudFormation Exports
version_added: 1.0.0
@@ -15,63 +13,60 @@ description:
author:
- "Michael Moyle (@mmoyle)"
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Get Exports
- community.aws.cloudformation_exports_info:
- profile: 'my_aws_profile'
- region: 'my_region'
+ community.aws.cloudformation_exports_info: {}
register: cf_exports
- ansible.builtin.debug:
msg: "{{ cf_exports }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
export_items:
description: A dictionary of Exports items names and values.
returned: Always
type: dict
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+"""
try:
- from botocore.exceptions import ClientError
from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # handled by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
@AWSRetry.exponential_backoff()
def list_exports(cloudformation_client):
- '''Get Exports Names and Values and return in dictionary '''
- list_exports_paginator = cloudformation_client.get_paginator('list_exports')
- exports = list_exports_paginator.paginate().build_full_result()['Exports']
+ """Get Exports Names and Values and return in dictionary"""
+ list_exports_paginator = cloudformation_client.get_paginator("list_exports")
+ exports = list_exports_paginator.paginate().build_full_result()["Exports"]
export_items = dict()
for item in exports:
- export_items[item['Name']] = item['Value']
+ export_items[item["Name"]] = item["Value"]
return export_items
def main():
argument_spec = dict()
- result = dict(
- changed=False,
- original_message=''
- )
+ result = dict(changed=False, original_message="")
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- cloudformation_client = module.client('cloudformation')
+ cloudformation_client = module.client("cloudformation")
try:
- result['export_items'] = list_exports(cloudformation_client)
+ result["export_items"] = list_exports(cloudformation_client)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e)
@@ -80,5 +75,5 @@ def main():
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py b/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
index c6771db5e..ebb9403e8 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudformation_stack_set.py
@@ -1,20 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: cloudformation_stack_set
version_added: 1.0.0
short_description: Manage groups of CloudFormation stacks
description:
- - Launches/updates/deletes AWS CloudFormation Stack Sets.
+ - Launches/updates/deletes AWS CloudFormation Stack Sets.
notes:
- - To make an individual stack, you want the M(amazon.aws.cloudformation) module.
+ - To make an individual stack, you want the M(amazon.aws.cloudformation) module.
options:
name:
description:
@@ -169,14 +167,15 @@ options:
- Note that this setting lets you specify the maximum for operations.
For large deployments, under certain circumstances the actual count may be lower.
-author: "Ryan Scott Brown (@ryansb)"
+author:
+ - "Ryan Scott Brown (@ryansb)"
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a stack set with instances in two accounts
community.aws.cloudformation_stack_set:
name: my-stack
@@ -202,7 +201,7 @@ EXAMPLES = r'''
- 123456789012
- 234567890123
regions:
- - us-east-1
+ - us-east-1
- name: The same type of update, but wait for the update to complete in all stacks
community.aws.cloudformation_stack_set:
@@ -218,7 +217,7 @@ EXAMPLES = r'''
- 123456789012
- 234567890123
regions:
- - us-east-1
+ - us-east-1
- name: Register new accounts (create new stack instances) with an existing stack set.
community.aws.cloudformation_stack_set:
@@ -235,10 +234,10 @@ EXAMPLES = r'''
- 234567890123
- 345678901234
regions:
- - us-east-1
-'''
+ - us-east-1
+"""
-RETURN = r'''
+RETURN = r"""
operations_log:
type: list
description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
@@ -316,8 +315,7 @@ stack_set:
other:
Type: "AWS::SNS::Topic"
Properties: {}
-
-''' # NOQA
+"""
import datetime
import itertools
@@ -325,7 +323,8 @@ import time
import uuid
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
# handled by AnsibleAWSModule
pass
@@ -333,19 +332,20 @@ except ImportError:
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def create_stack_set(module, stack_params, cfn):
try:
cfn.create_stack_set(aws_retry=True, **stack_params)
- return await_stack_set_exists(cfn, stack_params['StackSetName'])
+ return await_stack_set_exists(cfn, stack_params["StackSetName"])
except (ClientError, BotoCoreError) as err:
- module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName')))
+ module.fail_json_aws(err, msg=f"Failed to create stack set {stack_params.get('StackSetName')}.")
def update_stack_set(module, stack_params, cfn):
@@ -354,22 +354,34 @@ def update_stack_set(module, stack_params, cfn):
# don't need to be updated.
try:
cfn.update_stack_set(**stack_params)
- except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except
+ except is_boto3_error_code("StackSetNotFound") as err: # pylint: disable=duplicate-except
module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.")
- except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except
- module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check "
- "the `accounts` and `regions` parameters.")
- except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except
+ except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except
module.fail_json_aws(
- err, msg="Another operation is already in progress on this stack set - please try again later. When making "
- "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.")
+ err,
+ msg=(
+ "One or more stack instances were not found for this stack set. Double check "
+ "the `accounts` and `regions` parameters."
+ ),
+ )
+ except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ err,
+ msg=(
+ "Another operation is already in progress on this stack set - please try again later. When making"
+ " multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op"
+ " errors."
+ ),
+ )
except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except
module.fail_json_aws(err, msg="Could not update stack set.")
- if module.params.get('wait'):
+ if module.params.get("wait"):
await_stack_set_operation(
- module, cfn, operation_id=stack_params['OperationId'],
- stack_set_name=stack_params['StackSetName'],
- max_wait=module.params.get('wait_timeout'),
+ module,
+ cfn,
+ operation_id=stack_params["OperationId"],
+ stack_set_name=stack_params["StackSetName"],
+ max_wait=module.params.get("wait_timeout"),
)
return True
@@ -379,20 +391,24 @@ def compare_stack_instances(cfn, stack_set_name, accounts, regions):
instance_list = cfn.list_stack_instances(
aws_retry=True,
StackSetName=stack_set_name,
- )['Summaries']
+ )["Summaries"]
desired_stack_instances = set(itertools.product(accounts, regions))
- existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list)
+ existing_stack_instances = set((i["Account"], i["Region"]) for i in instance_list)
# new stacks, existing stacks, unspecified stacks
- return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances)
+ return (
+ (desired_stack_instances - existing_stack_instances),
+ existing_stack_instances,
+ (existing_stack_instances - desired_stack_instances),
+ )
@AWSRetry.jittered_backoff(retries=3, delay=4)
def stack_set_facts(cfn, stack_set_name):
try:
- ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet']
- ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
+ ss = cfn.describe_stack_set(StackSetName=stack_set_name)["StackSet"]
+ ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"])
return ss
- except cfn.exceptions.from_code('StackSetNotFound'):
+ except cfn.exceptions.from_code("StackSetNotFound"):
# Return None if the stack doesn't exist
return
@@ -403,29 +419,29 @@ def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wai
for i in range(max_wait // 15):
try:
operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id)
- if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'):
+ if operation["StackSetOperation"]["Status"] not in ("RUNNING", "STOPPING"):
# Stack set has completed operation
break
- except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
+ except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except
pass
- except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except
+ except is_boto3_error_code("OperationNotFound"): # pylint: disable=duplicate-except
pass
time.sleep(15)
- if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'):
+ if operation and operation["StackSetOperation"]["Status"] not in ("FAILED", "STOPPED"):
await_stack_instance_completion(
- module, cfn,
+ module,
+ cfn,
stack_set_name=stack_set_name,
# subtract however long we waited already
max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()),
)
- elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'):
+ elif operation and operation["StackSetOperation"]["Status"] in ("FAILED", "STOPPED"):
pass
else:
module.warn(
- "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format(
- operation_id, stack_set_name, max_wait
- )
+ f"Timed out waiting for operation {operation_id} on stack set {stack_set_name} after {max_wait} seconds."
+ " Returning unfinished operation"
)
@@ -434,84 +450,83 @@ def await_stack_instance_completion(module, cfn, stack_set_name, max_wait):
for i in range(max_wait // 15):
try:
stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name)
- to_await = [inst for inst in stack_instances['Summaries']
- if inst['Status'] != 'CURRENT']
+ to_await = [inst for inst in stack_instances["Summaries"] if inst["Status"] != "CURRENT"]
if not to_await:
- return stack_instances['Summaries']
- except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
+ return stack_instances["Summaries"]
+ except is_boto3_error_code("StackSetNotFound"): # pylint: disable=duplicate-except
# this means the deletion beat us, or the stack set is not yet propagated
pass
time.sleep(15)
module.warn(
- "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format(
- stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait
- )
+ f"Timed out waiting for stack set {stack_set_name} instances {', '.join(s['StackId'] for s in to_await)} to"
+ f" complete after {max_wait} seconds. Returning unfinished operation"
)
def await_stack_set_exists(cfn, stack_set_name):
# AWSRetry will retry on `StackSetNotFound` errors for us
- ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet']
- ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
- return camel_dict_to_snake_dict(ss, ignore_list=('Tags',))
+ ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)["StackSet"]
+ ss["Tags"] = boto3_tag_list_to_ansible_dict(ss["Tags"])
+ return camel_dict_to_snake_dict(ss, ignore_list=("Tags",))
def describe_stack_tree(module, stack_set_name, operation_ids=None):
- jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound'])
- cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
+ jittered_backoff_decorator = AWSRetry.jittered_backoff(
+ retries=5, delay=3, max_delay=5, catch_extra_error_codes=["StackSetNotFound"]
+ )
+ cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator)
result = dict()
- result['stack_set'] = camel_dict_to_snake_dict(
+ result["stack_set"] = camel_dict_to_snake_dict(
cfn.describe_stack_set(
StackSetName=stack_set_name,
aws_retry=True,
- )['StackSet']
+ )["StackSet"]
)
- result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags'])
- result['operations_log'] = sorted(
+ result["stack_set"]["tags"] = boto3_tag_list_to_ansible_dict(result["stack_set"]["tags"])
+ result["operations_log"] = sorted(
camel_dict_to_snake_dict(
cfn.list_stack_set_operations(
StackSetName=stack_set_name,
aws_retry=True,
)
- )['summaries'],
- key=lambda x: x['creation_timestamp']
+ )["summaries"],
+ key=lambda x: x["creation_timestamp"],
)
- result['stack_instances'] = sorted(
- [
- camel_dict_to_snake_dict(i) for i in
- cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries']
- ],
- key=lambda i: i['region'] + i['account']
+ result["stack_instances"] = sorted(
+ [camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances(StackSetName=stack_set_name)["Summaries"]],
+ key=lambda i: i["region"] + i["account"],
)
if operation_ids:
- result['operations'] = []
+ result["operations"] = []
for op_id in operation_ids:
try:
- result['operations'].append(camel_dict_to_snake_dict(
- cfn.describe_stack_set_operation(
- StackSetName=stack_set_name,
- OperationId=op_id,
- )['StackSetOperation']
- ))
- except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except
+ result["operations"].append(
+ camel_dict_to_snake_dict(
+ cfn.describe_stack_set_operation(
+ StackSetName=stack_set_name,
+ OperationId=op_id,
+ )["StackSetOperation"]
+ )
+ )
+ except is_boto3_error_code("OperationNotFoundException"): # pylint: disable=duplicate-except
pass
return result
def get_operation_preferences(module):
params = dict()
- if module.params.get('regions'):
- params['RegionOrder'] = list(module.params['regions'])
+ if module.params.get("regions"):
+ params["RegionOrder"] = list(module.params["regions"])
for param, api_name in {
- 'fail_count': 'FailureToleranceCount',
- 'fail_percentage': 'FailureTolerancePercentage',
- 'parallel_percentage': 'MaxConcurrentPercentage',
- 'parallel_count': 'MaxConcurrentCount',
+ "fail_count": "FailureToleranceCount",
+ "fail_percentage": "FailureTolerancePercentage",
+ "parallel_percentage": "MaxConcurrentPercentage",
+ "parallel_count": "MaxConcurrentCount",
}.items():
- if module.params.get('failure_tolerance', {}).get(param):
- params[api_name] = module.params.get('failure_tolerance', {}).get(param)
+ if module.params.get("failure_tolerance", {}).get(param):
+ params[api_name] = module.params.get("failure_tolerance", {}).get(param)
return params
@@ -519,148 +534,154 @@ def main():
argument_spec = dict(
name=dict(required=True),
description=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=900),
- state=dict(default='present', choices=['present', 'absent']),
- purge_stacks=dict(type='bool', default=True),
- parameters=dict(type='dict', default={}),
- template=dict(type='path'),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=900),
+ state=dict(default="present", choices=["present", "absent"]),
+ purge_stacks=dict(type="bool", default=True),
+ parameters=dict(type="dict", default={}),
+ template=dict(type="path"),
template_url=dict(),
template_body=dict(),
- capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
- regions=dict(type='list', elements='str'),
- accounts=dict(type='list', elements='str'),
+ capabilities=dict(type="list", elements="str", choices=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]),
+ regions=dict(type="list", elements="str"),
+ accounts=dict(type="list", elements="str"),
failure_tolerance=dict(
- type='dict',
+ type="dict",
default={},
options=dict(
- fail_count=dict(type='int'),
- fail_percentage=dict(type='int'),
- parallel_percentage=dict(type='int'),
- parallel_count=dict(type='int'),
+ fail_count=dict(type="int"),
+ fail_percentage=dict(type="int"),
+ parallel_percentage=dict(type="int"),
+ parallel_count=dict(type="int"),
),
mutually_exclusive=[
- ['fail_count', 'fail_percentage'],
- ['parallel_count', 'parallel_percentage'],
+ ["fail_count", "fail_percentage"],
+ ["parallel_count", "parallel_percentage"],
],
),
- administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']),
- execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']),
- tags=dict(type='dict'),
+ administration_role_arn=dict(aliases=["admin_role_arn", "administration_role", "admin_role"]),
+ execution_role_name=dict(aliases=["execution_role", "exec_role", "exec_role_name"]),
+ tags=dict(type="dict"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- mutually_exclusive=[['template_url', 'template', 'template_body']],
- supports_check_mode=True
+ mutually_exclusive=[["template_url", "template", "template_body"]],
+ supports_check_mode=True,
)
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
- jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound'])
- cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
- existing_stack_set = stack_set_facts(cfn, module.params['name'])
+ jittered_backoff_decorator = AWSRetry.jittered_backoff(
+ retries=10, delay=3, max_delay=30, catch_extra_error_codes=["StackSetNotFound"]
+ )
+ cfn = module.client("cloudformation", retry_decorator=jittered_backoff_decorator)
+ existing_stack_set = stack_set_facts(cfn, module.params["name"])
operation_uuid = to_native(uuid.uuid4())
operation_ids = []
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {}
- state = module.params['state']
- if state == 'present' and not module.params['accounts']:
+ state = module.params["state"]
+ if state == "present" and not module.params["accounts"]:
module.fail_json(
- msg="Can't create a stack set without choosing at least one account. "
+ msg=(
+ "Can't create a stack set without choosing at least one account. "
"To get the ID of the current account, use the aws_caller_info module."
+ )
)
- module.params['accounts'] = [to_native(a) for a in module.params['accounts']]
+ module.params["accounts"] = [to_native(a) for a in module.params["accounts"]]
- stack_params['StackSetName'] = module.params['name']
- if module.params.get('description'):
- stack_params['Description'] = module.params['description']
+ stack_params["StackSetName"] = module.params["name"]
+ if module.params.get("description"):
+ stack_params["Description"] = module.params["description"]
- if module.params.get('capabilities'):
- stack_params['Capabilities'] = module.params['capabilities']
+ if module.params.get("capabilities"):
+ stack_params["Capabilities"] = module.params["capabilities"]
- if module.params['template'] is not None:
- with open(module.params['template'], 'r') as tpl:
- stack_params['TemplateBody'] = tpl.read()
- elif module.params['template_body'] is not None:
- stack_params['TemplateBody'] = module.params['template_body']
- elif module.params['template_url'] is not None:
- stack_params['TemplateURL'] = module.params['template_url']
+ if module.params["template"] is not None:
+ with open(module.params["template"], "r") as tpl:
+ stack_params["TemplateBody"] = tpl.read()
+ elif module.params["template_body"] is not None:
+ stack_params["TemplateBody"] = module.params["template_body"]
+ elif module.params["template_url"] is not None:
+ stack_params["TemplateURL"] = module.params["template_url"]
else:
# no template is provided, but if the stack set exists already, we can use the existing one.
if existing_stack_set:
- stack_params['UsePreviousTemplate'] = True
+ stack_params["UsePreviousTemplate"] = True
else:
module.fail_json(
- msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
- "`template_body`, or `template_url`".format(module.params['name'])
+ msg=(
+ f"The Stack Set {module.params['name']} does not exist, and no template was provided. Provide one"
+ " of `template`, `template_body`, or `template_url`"
+ )
)
- stack_params['Parameters'] = []
- for k, v in module.params['parameters'].items():
+ stack_params["Parameters"] = []
+ for k, v in module.params["parameters"].items():
if isinstance(v, dict):
# set parameter based on a dict to allow additional CFN Parameter Attributes
param = dict(ParameterKey=k)
- if 'value' in v:
- param['ParameterValue'] = to_native(v['value'])
+ if "value" in v:
+ param["ParameterValue"] = to_native(v["value"])
- if 'use_previous_value' in v and bool(v['use_previous_value']):
- param['UsePreviousValue'] = True
- param.pop('ParameterValue', None)
+ if "use_previous_value" in v and bool(v["use_previous_value"]):
+ param["UsePreviousValue"] = True
+ param.pop("ParameterValue", None)
- stack_params['Parameters'].append(param)
+ stack_params["Parameters"].append(param)
else:
# allow default k/v configuration to set a template parameter
- stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+ stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)})
- if module.params.get('tags') and isinstance(module.params.get('tags'), dict):
- stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+ if module.params.get("tags") and isinstance(module.params.get("tags"), dict):
+ stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"])
- if module.params.get('administration_role_arn'):
+ if module.params.get("administration_role_arn"):
# TODO loosen the semantics here to autodetect the account ID and build the ARN
- stack_params['AdministrationRoleARN'] = module.params['administration_role_arn']
- if module.params.get('execution_role_name'):
- stack_params['ExecutionRoleName'] = module.params['execution_role_name']
+ stack_params["AdministrationRoleARN"] = module.params["administration_role_arn"]
+ if module.params.get("execution_role_name"):
+ stack_params["ExecutionRoleName"] = module.params["execution_role_name"]
result = {}
if module.check_mode:
- if state == 'absent' and existing_stack_set:
- module.exit_json(changed=True, msg='Stack set would be deleted', meta=[])
- elif state == 'absent' and not existing_stack_set:
- module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[])
- elif state == 'present' and not existing_stack_set:
- module.exit_json(changed=True, msg='New stack set would be created', meta=[])
- elif state == 'present' and existing_stack_set:
+ if state == "absent" and existing_stack_set:
+ module.exit_json(changed=True, msg="Stack set would be deleted", meta=[])
+ elif state == "absent" and not existing_stack_set:
+ module.exit_json(changed=False, msg="Stack set doesn't exist", meta=[])
+ elif state == "present" and not existing_stack_set:
+ module.exit_json(changed=True, msg="New stack set would be created", meta=[])
+ elif state == "present" and existing_stack_set:
new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
cfn,
- module.params['name'],
- module.params['accounts'],
- module.params['regions'],
+ module.params["name"],
+ module.params["accounts"],
+ module.params["regions"],
)
if new_stacks:
- module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[])
- elif unspecified_stacks and module.params.get('purge_stack_instances'):
- module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[])
+ module.exit_json(changed=True, msg="New stack instance(s) would be created", meta=[])
+ elif unspecified_stacks and module.params.get("purge_stack_instances"):
+ module.exit_json(changed=True, msg="Old stack instance(s) would be deleted", meta=[])
else:
# TODO: need to check the template and other settings for correct check mode
- module.exit_json(changed=False, msg='No changes detected', meta=[])
+ module.exit_json(changed=False, msg="No changes detected", meta=[])
changed = False
- if state == 'present':
+ if state == "present":
if not existing_stack_set:
# on create this parameter has a different name, and cannot be referenced later in the job log
- stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid)
+ stack_params["ClientRequestToken"] = f"Ansible-StackSet-Create-{operation_uuid}"
changed = True
create_stack_set(module, stack_params, cfn)
else:
- stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid)
- operation_ids.append(stack_params['OperationId'])
- if module.params.get('regions'):
- stack_params['OperationPreferences'] = get_operation_preferences(module)
+ stack_params["OperationId"] = f"Ansible-StackSet-Update-{operation_uuid}"
+ operation_ids.append(stack_params["OperationId"])
+ if module.params.get("regions"):
+ stack_params["OperationPreferences"] = get_operation_preferences(module)
changed |= update_stack_set(module, stack_params, cfn)
await_stack_set_operation(
@@ -674,24 +695,24 @@ def main():
# now create/update any appropriate stack instances
new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
cfn,
- module.params['name'],
- module.params['accounts'],
- module.params['regions'],
+ module.params["name"],
+ module.params["accounts"],
+ module.params["regions"],
)
if new_stack_instances:
- operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid))
+ operation_ids.append(f"Ansible-StackInstance-Create-{operation_uuid}")
changed = True
cfn.create_stack_instances(
- StackSetName=module.params['name'],
+ StackSetName=module.params["name"],
Accounts=list(set(acct for acct, region in new_stack_instances)),
Regions=list(set(region for acct, region in new_stack_instances)),
OperationPreferences=get_operation_preferences(module),
OperationId=operation_ids[-1],
)
else:
- operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid))
+ operation_ids.append(f"Ansible-StackInstance-Update-{operation_uuid}")
cfn.update_stack_instances(
- StackSetName=module.params['name'],
+ StackSetName=module.params["name"],
Accounts=list(set(acct for acct, region in existing_stack_instances)),
Regions=list(set(region for acct, region in existing_stack_instances)),
OperationPreferences=get_operation_preferences(module),
@@ -699,55 +720,67 @@ def main():
)
for op in operation_ids:
await_stack_set_operation(
- module, cfn, operation_id=op,
- stack_set_name=module.params['name'],
- max_wait=module.params.get('wait_timeout'),
+ module,
+ cfn,
+ operation_id=op,
+ stack_set_name=module.params["name"],
+ max_wait=module.params.get("wait_timeout"),
)
- elif state == 'absent':
+ elif state == "absent":
if not existing_stack_set:
- module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name']))
- if module.params.get('purge_stack_instances') is False:
+ module.exit_json(msg=f"Stack set {module.params['name']} does not exist")
+ if module.params.get("purge_stack_instances") is False:
pass
try:
cfn.delete_stack_set(
- StackSetName=module.params['name'],
+ StackSetName=module.params["name"],
+ )
+ module.exit_json(msg=f"Stack set {module.params['name']} deleted")
+ except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e, msg=f"Cannot delete stack {module.params['name']} while there is an operation in progress"
)
- module.exit_json(msg='Stack set {0} deleted'.format(module.params['name']))
- except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name']))
- except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except
- delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid)
+ except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except
+ delete_instances_op = f"Ansible-StackInstance-Delete-{operation_uuid}"
cfn.delete_stack_instances(
- StackSetName=module.params['name'],
- Accounts=module.params['accounts'],
- Regions=module.params['regions'],
- RetainStacks=(not module.params.get('purge_stacks')),
- OperationId=delete_instances_op
+ StackSetName=module.params["name"],
+ Accounts=module.params["accounts"],
+ Regions=module.params["regions"],
+ RetainStacks=(not module.params.get("purge_stacks")),
+ OperationId=delete_instances_op,
)
await_stack_set_operation(
- module, cfn, operation_id=delete_instances_op,
- stack_set_name=stack_params['StackSetName'],
- max_wait=module.params.get('wait_timeout'),
+ module,
+ cfn,
+ operation_id=delete_instances_op,
+ stack_set_name=stack_params["StackSetName"],
+ max_wait=module.params.get("wait_timeout"),
)
try:
cfn.delete_stack_set(
- StackSetName=module.params['name'],
+ StackSetName=module.params["name"],
)
- except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except
+ except is_boto3_error_code("StackSetNotEmptyException") as exc: # pylint: disable=duplicate-except
# this time, it is likely that either the delete failed or there are more stacks.
instances = cfn.list_stack_instances(
- StackSetName=module.params['name'],
+ StackSetName=module.params["name"],
+ )
+ stack_states = ", ".join(
+ "(account={Account}, region={Region}, state={Status})".format(**i) for i in instances["Summaries"]
+ )
+ module.fail_json_aws(
+ exc,
+ msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: "
+ + stack_states,
)
- stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries'])
- module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states)
- module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name']))
+ module.exit_json(changed=True, msg=f"Stack set {module.params['name']} deleted")
- result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids))
- if any(o['status'] == 'FAILED' for o in result['operations']):
+ result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids))
+ if any(o["status"] == "FAILED" for o in result["operations"]):
module.fail_json(msg="One or more operations failed to execute", **result)
module.exit_json(changed=changed, **result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
index 447fd994e..13718cfb8 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
version_added: 1.0.0
@@ -21,12 +19,6 @@ author:
- Willem van Ketwich (@wilvk)
- Will Thames (@willthames)
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
-
options:
state:
@@ -119,6 +111,17 @@ options:
origin_path:
description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin.
type: str
+ origin_shield:
+ description: Specify origin shield options for the origin.
+ type: dict
+ suboptions:
+ enabled:
+ description: Indicate whether you want the origin to have Origin Shield enabled or not.
+ type: bool
+ origin_shield_region:
+ description: Specify which AWS region will be used for Origin Shield. Required if Origin Shield is enabled.
+ type: str
+ version_added: 6.0.0
custom_headers:
description:
- Custom headers you wish to add to the request before passing it to the origin.
@@ -169,7 +172,18 @@ options:
origin_keepalive_timeout:
description: A keep-alive timeout (in seconds).
type: int
-
+ connection_attempts:
+ description: The number of times that CloudFront attempts to connect to the origin.
+ The minimum number is C(1), the maximum is C(3).
+ type: int
+ default: 3
+ version_added: 6.0.0
+ connection_timeout:
+ description: The number of seconds that CloudFront waits when trying to establish a connection to the origin.
+ The minimum timeout is C(1) second, the maximum is C(10) seconds.
+ type: int
+ default: 10
+ version_added: 6.0.0
purge_origins:
description: Whether to remove any origins that aren't listed in I(origins).
default: false
@@ -191,9 +205,25 @@ options:
description:
- The ID of the header policy that CloudFront adds to responses that it sends to viewers.
type: str
+ cache_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the cache policy for CloudFront to use for the default cache behavior.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html)
+ type: str
+ origin_request_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the origin request policy for CloudFront to use for the default cache behavior.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html)
+ type: str
forwarded_values:
description:
- A dict that specifies how CloudFront handles query strings and cookies.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
type: dict
suboptions:
query_string:
@@ -312,9 +342,25 @@ options:
description:
- The ID of the header policy that CloudFront adds to responses that it sends to viewers.
type: str
+ cache_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the cache policy for CloudFront to use for the cache behavior.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html)
+ type: str
+ origin_request_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the origin request policy for CloudFront to use for the cache behavior.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html)
+ type: str
forwarded_values:
description:
- A dict that specifies how CloudFront handles query strings and cookies.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
type: dict
suboptions:
query_string:
@@ -556,7 +602,7 @@ options:
description:
- The version of the http protocol to use for the distribution.
- AWS defaults this to C(http2).
- - Valid values are C(http1.1) and C(http2).
+ - Valid values are C(http1.1), C(http2), C(http3) and C(http2and3).
type: str
ipv6_enabled:
@@ -577,9 +623,14 @@ options:
default: 1800
type: int
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: create a basic distribution with defaults and tags
community.aws.cloudfront_distribution:
state: present
@@ -606,7 +657,9 @@ EXAMPLES = r'''
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by cloudfront.py again
- aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
+ aliases:
+ - 'www.my-distribution-source.com'
+ - 'zzz.aaa.io'
- name: update a distribution's aliases and comment using an alias as a reference
community.aws.cloudfront_distribution:
@@ -633,12 +686,12 @@ EXAMPLES = r'''
state: present
caller_reference: unique test distribution ID
origins:
- - id: 'my test origin-000111'
- domain_name: www.example.com
- origin_path: /production
- custom_headers:
- - header_name: MyCustomHeaderName
- header_value: MyCustomHeaderValue
+ - id: 'my test origin-000111'
+ domain_name: www.example.com
+ origin_path: /production
+ custom_headers:
+ - header_name: MyCustomHeaderName
+ header_value: MyCustomHeaderValue
default_cache_behavior:
target_origin_id: 'my test origin-000111'
forwarded_values:
@@ -646,7 +699,7 @@ EXAMPLES = r'''
cookies:
forward: all
headers:
- - '*'
+ - '*'
viewer_protocol_policy: allow-all
smooth_streaming: true
compress: true
@@ -669,9 +722,9 @@ EXAMPLES = r'''
community.aws.cloudfront_distribution:
state: absent
caller_reference: replaceable distribution
-'''
+"""
-RETURN = r'''
+RETURN = r"""
active_trusted_signers:
description: Key pair IDs that CloudFront is aware of for each trusted signer.
returned: always
@@ -1278,6 +1331,32 @@ origins:
returned: always
type: str
sample: ''
+ connection_attempts:
+ description: The number of times that CloudFront attempts to connect to the origin.
+ returned: always
+ type: int
+ sample: 3
+ connection_timeout:
+ description: The number of seconds that CloudFront waits when trying to establish a connection to the origin.
+ returned: always
+ type: int
+ sample: 10
+ origin_shield:
+ description: Configuration of the origin Origin Shield.
+ returned: always
+ type: complex
+ contains:
+ enabled:
+ description: Whether Origin Shield is enabled or not.
+ returned: always
+ type: bool
+ sample: false
+ origin_shield_region:
+ description: Which region is used by Origin Shield.
+ returned: when enabled is true
+ type: str
+ sample: us-east-1
+ version_added: 6.0.0
s3_origin_config:
description: Origin access identity configuration for S3 Origin.
returned: when s3_origin_access_identity_enabled is true
@@ -1368,29 +1447,31 @@ web_acl_id:
returned: always
type: str
sample: abcd1234-1234-abcd-abcd-abcd12345678
-'''
+"""
-from ansible.module_utils._text import to_text, to_native
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
-from ansible.module_utils.common.dict_transformations import recursive_diff
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
import datetime
-
-try:
- from collections import OrderedDict
-except ImportError:
- try:
- from ordereddict import OrderedDict
- except ImportError:
- pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
+import re
+from collections import OrderedDict
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def change_dict_key_name(dictionary, old_key, new_key):
if old_key in dictionary:
@@ -1417,43 +1498,44 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
if list_items is None:
list_items = []
if not isinstance(list_items, list):
- raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
+ raise ValueError(f"Expected a list, got a {type(list_items).__name__} with value {str(list_items)}")
result = {}
if include_quantity:
- result['quantity'] = len(list_items)
+ result["quantity"] = len(list_items)
if len(list_items) > 0:
- result['items'] = list_items
+ result["items"] = list_items
return result
def create_distribution(client, module, config, tags):
try:
if not tags:
- return client.create_distribution(aws_retry=True, DistributionConfig=config)['Distribution']
+ return client.create_distribution(aws_retry=True, DistributionConfig=config)["Distribution"]
else:
- distribution_config_with_tags = {
- 'DistributionConfig': config,
- 'Tags': {
- 'Items': tags
- }
- }
- return client.create_distribution_with_tags(aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
+ distribution_config_with_tags = {"DistributionConfig": config, "Tags": {"Items": tags}}
+ return client.create_distribution_with_tags(
+ aws_retry=True, DistributionConfigWithTags=distribution_config_with_tags
+ )["Distribution"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error creating distribution")
def delete_distribution(client, module, distribution):
try:
- return client.delete_distribution(aws_retry=True, Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
+ return client.delete_distribution(
+ aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"]
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
+ module.fail_json_aws(e, msg=f"Error deleting distribution {to_native(distribution['Distribution'])}")
def update_distribution(client, module, config, distribution_id, e_tag):
try:
- return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
+ return client.update_distribution(aws_retry=True, DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)[
+ "Distribution"
+ ]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
+ module.fail_json_aws(e, msg=f"Error updating distribution to {to_native(config)}")
def tag_resource(client, module, arn, tags):
@@ -1473,7 +1555,7 @@ def untag_resource(client, module, arn, tag_keys):
def list_tags_for_resource(client, module, arn):
try:
response = client.list_tags_for_resource(aws_retry=True, Resource=arn)
- return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
+ return boto3_tag_list_to_ansible_dict(response.get("Tags").get("Items"))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error listing tags for resource")
@@ -1505,103 +1587,131 @@ class CloudFrontValidationManager(object):
self.__default_https_port = 443
self.__default_ipv6_enabled = False
self.__default_origin_ssl_protocols = [
- 'TLSv1',
- 'TLSv1.1',
- 'TLSv1.2'
+ "TLSv1",
+ "TLSv1.1",
+ "TLSv1.2",
]
- self.__default_custom_origin_protocol_policy = 'match-viewer'
+ self.__default_custom_origin_protocol_policy = "match-viewer"
self.__default_custom_origin_read_timeout = 30
self.__default_custom_origin_keepalive_timeout = 5
- self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
+ self.__default_datetime_string = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
self.__default_cache_behavior_min_ttl = 0
self.__default_cache_behavior_max_ttl = 31536000
self.__default_cache_behavior_default_ttl = 86400
self.__default_cache_behavior_compress = False
- self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
+ self.__default_cache_behavior_viewer_protocol_policy = "allow-all"
self.__default_cache_behavior_smooth_streaming = False
- self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
+ self.__default_cache_behavior_forwarded_values_forward_cookies = "none"
self.__default_cache_behavior_forwarded_values_query_string = True
self.__default_trusted_signers_enabled = False
- self.__valid_price_classes = set([
- 'PriceClass_100',
- 'PriceClass_200',
- 'PriceClass_All'
- ])
- self.__valid_origin_protocol_policies = set([
- 'http-only',
- 'match-viewer',
- 'https-only'
- ])
- self.__valid_origin_ssl_protocols = set([
- 'SSLv3',
- 'TLSv1',
- 'TLSv1.1',
- 'TLSv1.2'
- ])
- self.__valid_cookie_forwarding = set([
- 'none',
- 'whitelist',
- 'all'
- ])
- self.__valid_viewer_protocol_policies = set([
- 'allow-all',
- 'https-only',
- 'redirect-to-https'
- ])
- self.__valid_methods = set([
- 'GET',
- 'HEAD',
- 'POST',
- 'PUT',
- 'PATCH',
- 'OPTIONS',
- 'DELETE'
- ])
+ self.__valid_price_classes = set(
+ [
+ "PriceClass_100",
+ "PriceClass_200",
+ "PriceClass_All",
+ ]
+ )
+ self.__valid_origin_protocol_policies = set(
+ [
+ "http-only",
+ "match-viewer",
+ "https-only",
+ ]
+ )
+ self.__valid_origin_ssl_protocols = set(
+ [
+ "SSLv3",
+ "TLSv1",
+ "TLSv1.1",
+ "TLSv1.2",
+ ]
+ )
+ self.__valid_cookie_forwarding = set(
+ [
+ "none",
+ "whitelist",
+ "all",
+ ]
+ )
+ self.__valid_viewer_protocol_policies = set(
+ [
+ "allow-all",
+ "https-only",
+ "redirect-to-https",
+ ]
+ )
+ self.__valid_methods = set(
+ [
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "PATCH",
+ "OPTIONS",
+ "DELETE",
+ ]
+ )
self.__valid_methods_cached_methods = [
- set([
- 'GET',
- 'HEAD'
- ]),
- set([
- 'GET',
- 'HEAD',
- 'OPTIONS'
- ])
+ set(
+ [
+ "GET",
+ "HEAD",
+ ]
+ ),
+ set(
+ [
+ "GET",
+ "HEAD",
+ "OPTIONS",
+ ]
+ ),
]
self.__valid_methods_allowed_methods = [
self.__valid_methods_cached_methods[0],
self.__valid_methods_cached_methods[1],
- self.__valid_methods
+ self.__valid_methods,
]
- self.__valid_lambda_function_association_event_types = set([
- 'viewer-request',
- 'viewer-response',
- 'origin-request',
- 'origin-response'
- ])
- self.__valid_viewer_certificate_ssl_support_methods = set([
- 'sni-only',
- 'vip'
- ])
- self.__valid_viewer_certificate_minimum_protocol_versions = set([
- 'SSLv3',
- 'TLSv1',
- 'TLSv1_2016',
- 'TLSv1.1_2016',
- 'TLSv1.2_2018',
- 'TLSv1.2_2019',
- 'TLSv1.2_2021'
- ])
- self.__valid_viewer_certificate_certificate_sources = set([
- 'cloudfront',
- 'iam',
- 'acm'
- ])
- self.__valid_http_versions = set([
- 'http1.1',
- 'http2'
- ])
- self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
+ self.__valid_lambda_function_association_event_types = set(
+ [
+ "viewer-request",
+ "viewer-response",
+ "origin-request",
+ "origin-response",
+ ]
+ )
+ self.__valid_viewer_certificate_ssl_support_methods = set(
+ [
+ "sni-only",
+ "vip",
+ ]
+ )
+ self.__valid_viewer_certificate_minimum_protocol_versions = set(
+ [
+ "SSLv3",
+ "TLSv1",
+ "TLSv1_2016",
+ "TLSv1.1_2016",
+ "TLSv1.2_2018",
+ "TLSv1.2_2019",
+ "TLSv1.2_2021",
+ ]
+ )
+ self.__valid_viewer_certificate_certificate_sources = set(
+ [
+ "cloudfront",
+ "iam",
+ "acm",
+ ]
+ )
+ self.__valid_http_versions = set(
+ [
+ "http1.1",
+ "http2",
+ "http3",
+ "http2and3",
+ ]
+ )
+ self.__s3_bucket_domain_regex = re.compile(r"\.s3(?:\.[^.]+)?\.amazonaws\.com$")
def add_missing_key(self, dict_object, key_to_set, value_to_set):
if key_to_set not in dict_object and value_to_set is not None:
@@ -1615,7 +1725,9 @@ class CloudFrontValidationManager(object):
dict_object = change_dict_key_name(dict_object, old_key, new_key)
return dict_object
- def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
+ def add_key_else_validate(
+ self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False
+ ):
if key_name in dict_object:
self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
else:
@@ -1630,26 +1742,36 @@ class CloudFrontValidationManager(object):
if logging is None:
return None
valid_logging = {}
- if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
- self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
- valid_logging['include_cookies'] = logging.get('include_cookies')
- valid_logging['enabled'] = logging.get('enabled')
- valid_logging['bucket'] = logging.get('bucket')
- valid_logging['prefix'] = logging.get('prefix')
+ if logging and not set(["enabled", "include_cookies", "bucket", "prefix"]).issubset(logging):
+ self.module.fail_json(
+ msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified."
+ )
+ valid_logging["include_cookies"] = logging.get("include_cookies")
+ valid_logging["enabled"] = logging.get("enabled")
+ valid_logging["bucket"] = logging.get("bucket")
+ valid_logging["prefix"] = logging.get("prefix")
return valid_logging
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution logging")
def validate_is_list(self, list_to_validate, list_name):
if not isinstance(list_to_validate, list):
- self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
+ self.module.fail_json(msg=f"{list_name} is of type {type(list_to_validate).__name__}. Must be a list.")
def validate_required_key(self, key_name, full_key_name, dict_object):
if key_name not in dict_object:
- self.module.fail_json(msg="%s must be specified." % full_key_name)
-
- def validate_origins(self, client, config, origins, default_origin_domain_name,
- default_origin_path, create_distribution, purge_origins=False):
+ self.module.fail_json(msg=f"{full_key_name} must be specified.")
+
+ def validate_origins(
+ self,
+ client,
+ config,
+ origins,
+ default_origin_domain_name,
+ default_origin_path,
+ create_distribution,
+ purge_origins=False,
+ ):
try:
if origins is None:
if default_origin_domain_name is None and not create_distribution:
@@ -1658,23 +1780,24 @@ class CloudFrontValidationManager(object):
else:
return ansible_list_to_cloudfront_list(config)
if default_origin_domain_name is not None:
- origins = [{
- 'domain_name': default_origin_domain_name,
- 'origin_path': default_origin_path or ''
- }]
+ origins = [{"domain_name": default_origin_domain_name, "origin_path": default_origin_path or ""}]
else:
origins = []
- self.validate_is_list(origins, 'origins')
+ self.validate_is_list(origins, "origins")
if not origins and default_origin_domain_name is None and create_distribution:
- self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
+ self.module.fail_json(
+ msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one."
+ )
all_origins = OrderedDict()
new_domains = list()
for origin in config:
- all_origins[origin.get('domain_name')] = origin
+ all_origins[origin.get("domain_name")] = origin
for origin in origins:
- origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
- all_origins[origin['domain_name']] = origin
- new_domains.append(origin['domain_name'])
+ origin = self.validate_origin(
+ client, all_origins.get(origin.get("domain_name"), {}), origin, default_origin_path
+ )
+ all_origins[origin["domain_name"]] = origin
+ new_domains.append(origin["domain_name"])
if purge_origins:
for domain in list(all_origins.keys()):
if domain not in new_domains:
@@ -1684,37 +1807,55 @@ class CloudFrontValidationManager(object):
self.module.fail_json_aws(e, msg="Error validating distribution origins")
def validate_s3_origin_configuration(self, client, existing_config, origin):
- if origin.get('s3_origin_config', {}).get('origin_access_identity'):
- return origin['s3_origin_config']['origin_access_identity']
+ if origin.get("s3_origin_config", {}).get("origin_access_identity"):
+ return origin["s3_origin_config"]["origin_access_identity"]
- if existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
- return existing_config['s3_origin_config']['origin_access_identity']
+ if existing_config.get("s3_origin_config", {}).get("origin_access_identity"):
+ return existing_config["s3_origin_config"]["origin_access_identity"]
try:
- comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
- caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
- cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference,
- Comment=comment))
- oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
+ comment = f"access-identity-by-ansible-{origin.get('domain_name')}-{self.__default_datetime_string}"
+ caller_reference = f"{origin.get('domain_name')}-{self.__default_datetime_string}"
+ cfoai_config = dict(
+ CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment)
+ )
+ oai = client.create_cloud_front_origin_access_identity(**cfoai_config)["CloudFrontOriginAccessIdentity"][
+ "Id"
+ ]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
- return "origin-access-identity/cloudfront/%s" % oai
+ self.module.fail_json_aws(e, msg=f"Couldn't create Origin Access Identity for id {origin['id']}")
+ return f"origin-access-identity/cloudfront/{oai}"
def validate_origin(self, client, existing_config, origin, default_origin_path):
try:
- origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
- self.validate_required_key('origin_path', 'origins[].origin_path', origin)
- origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
- if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
- for custom_header in origin.get('custom_headers'):
- if 'header_name' not in custom_header or 'header_value' not in custom_header:
- self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
- origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
+ origin = self.add_missing_key(
+ origin, "origin_path", existing_config.get("origin_path", default_origin_path or "")
+ )
+ self.validate_required_key("origin_path", "origins[].origin_path", origin)
+ origin = self.add_missing_key(origin, "id", existing_config.get("id", self.__default_datetime_string))
+ if "custom_headers" in origin and len(origin.get("custom_headers")) > 0:
+ for custom_header in origin.get("custom_headers"):
+ if "header_name" not in custom_header or "header_value" not in custom_header:
+ self.module.fail_json(
+ msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified."
+ )
+ origin["custom_headers"] = ansible_list_to_cloudfront_list(origin.get("custom_headers"))
else:
- origin['custom_headers'] = ansible_list_to_cloudfront_list()
- if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
+ origin["custom_headers"] = ansible_list_to_cloudfront_list()
+ if "origin_shield" in origin:
+ origin_shield = origin.get("origin_shield")
+ if origin_shield.get("enabled"):
+ origin_shield_region = origin_shield.get("origin_shield_region")
+ if origin_shield_region is None:
+ self.module.fail_json(
+ msg="origins[].origin_shield.origin_shield_region must be specified"
+ " when origins[].origin_shield.enabled is true."
+ )
+ else:
+ origin_shield_region = origin_shield_region.lower()
+ if self.__s3_bucket_domain_regex.search(origin.get("domain_name").lower()):
if origin.get("s3_origin_access_identity_enabled") is not None:
- if origin['s3_origin_access_identity_enabled']:
+ if origin["s3_origin_access_identity_enabled"]:
s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
else:
s3_origin_config = None
@@ -1728,26 +1869,47 @@ class CloudFrontValidationManager(object):
origin["s3_origin_config"] = dict(origin_access_identity=oai)
- if 'custom_origin_config' in origin:
- self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
+ if "custom_origin_config" in origin:
+ self.module.fail_json(
+ msg="s3 origin domains and custom_origin_config are mutually exclusive",
+ )
else:
- origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
- custom_origin_config = origin.get('custom_origin_config')
- custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
- 'origins[].custom_origin_config.origin_protocol_policy',
- self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
- custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
- custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
- custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
- custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
- if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
- custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
- if custom_origin_config.get('origin_ssl_protocols'):
- self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
- self.__valid_origin_ssl_protocols)
+ origin = self.add_missing_key(
+ origin, "custom_origin_config", existing_config.get("custom_origin_config", {})
+ )
+ custom_origin_config = origin.get("custom_origin_config")
+ custom_origin_config = self.add_key_else_validate(
+ custom_origin_config,
+ "origin_protocol_policy",
+ "origins[].custom_origin_config.origin_protocol_policy",
+ self.__default_custom_origin_protocol_policy,
+ self.__valid_origin_protocol_policies,
+ )
+ custom_origin_config = self.add_missing_key(
+ custom_origin_config, "origin_read_timeout", self.__default_custom_origin_read_timeout
+ )
+ custom_origin_config = self.add_missing_key(
+ custom_origin_config, "origin_keepalive_timeout", self.__default_custom_origin_keepalive_timeout
+ )
+ custom_origin_config = self.add_key_else_change_dict_key(
+ custom_origin_config, "http_port", "h_t_t_p_port", self.__default_http_port
+ )
+ custom_origin_config = self.add_key_else_change_dict_key(
+ custom_origin_config, "https_port", "h_t_t_p_s_port", self.__default_https_port
+ )
+ if custom_origin_config.get("origin_ssl_protocols", {}).get("items"):
+ custom_origin_config["origin_ssl_protocols"] = custom_origin_config["origin_ssl_protocols"]["items"]
+ if custom_origin_config.get("origin_ssl_protocols"):
+ self.validate_attribute_list_with_allowed_list(
+ custom_origin_config["origin_ssl_protocols"],
+ "origins[].origin_ssl_protocols",
+ self.__valid_origin_ssl_protocols,
+ )
else:
- custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
- custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
+ custom_origin_config["origin_ssl_protocols"] = self.__default_origin_ssl_protocols
+ custom_origin_config["origin_ssl_protocols"] = ansible_list_to_cloudfront_list(
+ custom_origin_config["origin_ssl_protocols"]
+ )
return origin
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating distribution origin")
@@ -1761,13 +1923,16 @@ class CloudFrontValidationManager(object):
# is true (if purge_cache_behaviors is not true, we can't really know the full new order)
if not purge_cache_behaviors:
for behavior in config:
- all_cache_behaviors[behavior['path_pattern']] = behavior
+ all_cache_behaviors[behavior["path_pattern"]] = behavior
for cache_behavior in cache_behaviors:
- valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
- cache_behavior, valid_origins)
- all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
+ valid_cache_behavior = self.validate_cache_behavior(
+ all_cache_behaviors.get(cache_behavior.get("path_pattern"), {}), cache_behavior, valid_origins
+ )
+ all_cache_behaviors[cache_behavior["path_pattern"]] = valid_cache_behavior
if purge_cache_behaviors:
- for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
+ for target_origin_id in set(all_cache_behaviors.keys()) - set(
+ [cb["path_pattern"] for cb in cache_behaviors]
+ ):
del all_cache_behaviors[target_origin_id]
return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
except Exception as e:
@@ -1778,40 +1943,79 @@ class CloudFrontValidationManager(object):
cache_behavior = {}
if cache_behavior is None and valid_origins is not None:
return config
- cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
- cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
- cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
- cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
- cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
- cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior)
+ cache_behavior = self.validate_cache_behavior_first_level_keys(
+ config, cache_behavior, valid_origins, is_default_cache
+ )
+ if cache_behavior.get("cache_policy_id") is None:
+ cache_behavior = self.validate_forwarded_values(
+ config, cache_behavior.get("forwarded_values"), cache_behavior
+ )
+ cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior)
+ cache_behavior = self.validate_lambda_function_associations(
+ config, cache_behavior.get("lambda_function_associations"), cache_behavior
+ )
+ cache_behavior = self.validate_trusted_signers(config, cache_behavior.get("trusted_signers"), cache_behavior)
+ cache_behavior = self.validate_field_level_encryption_id(
+ config, cache_behavior.get("field_level_encryption_id"), cache_behavior
+ )
return cache_behavior
def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
+ if cache_behavior.get("cache_policy_id") is not None and cache_behavior.get("forwarded_values") is not None:
+ if is_default_cache:
+ cache_behavior_name = "Default cache behavior"
+ else:
+ cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}"
+ self.module.fail_json(
+ msg=f"{cache_behavior_name} cannot have both a cache_policy_id and a forwarded_values option."
+ )
try:
- cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
- config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
- cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
- config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
- cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
- config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
- cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
- target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
+ if cache_behavior.get("cache_policy_id") is None:
+ cache_behavior = self.add_key_else_change_dict_key(
+ cache_behavior,
+ "min_ttl",
+ "min_t_t_l",
+ config.get("min_t_t_l", self.__default_cache_behavior_min_ttl),
+ )
+ cache_behavior = self.add_key_else_change_dict_key(
+ cache_behavior,
+ "max_ttl",
+ "max_t_t_l",
+ config.get("max_t_t_l", self.__default_cache_behavior_max_ttl),
+ )
+ cache_behavior = self.add_key_else_change_dict_key(
+ cache_behavior,
+ "default_ttl",
+ "default_t_t_l",
+ config.get("default_t_t_l", self.__default_cache_behavior_default_ttl),
+ )
+ cache_behavior = self.add_missing_key(
+ cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress)
+ )
+ target_origin_id = cache_behavior.get("target_origin_id", config.get("target_origin_id"))
if not target_origin_id:
target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
- if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
+ if target_origin_id not in [origin["id"] for origin in valid_origins.get("items", [])]:
if is_default_cache:
- cache_behavior_name = 'Default cache behavior'
+ cache_behavior_name = "Default cache behavior"
else:
- cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
- self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
- cache_behavior_name)
- cache_behavior['target_origin_id'] = target_origin_id
- cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
- config.get('viewer_protocol_policy',
- self.__default_cache_behavior_viewer_protocol_policy),
- self.__valid_viewer_protocol_policies)
- cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
- config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
+ cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}"
+ self.module.fail_json(
+ msg=f"{cache_behavior_name} has target_origin_id pointing to an origin that does not exist."
+ )
+ cache_behavior["target_origin_id"] = target_origin_id
+ cache_behavior = self.add_key_else_validate(
+ cache_behavior,
+ "viewer_protocol_policy",
+ "cache_behavior.viewer_protocol_policy",
+ config.get("viewer_protocol_policy", self.__default_cache_behavior_viewer_protocol_policy),
+ self.__valid_viewer_protocol_policies,
+ )
+ cache_behavior = self.add_missing_key(
+ cache_behavior,
+ "smooth_streaming",
+ config.get("smooth_streaming", self.__default_cache_behavior_smooth_streaming),
+ )
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
@@ -1820,30 +2024,40 @@ class CloudFrontValidationManager(object):
try:
if not forwarded_values:
forwarded_values = dict()
- existing_config = config.get('forwarded_values', {})
- headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
+ existing_config = config.get("forwarded_values", {})
+ headers = forwarded_values.get("headers", existing_config.get("headers", {}).get("items"))
if headers:
headers.sort()
- forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
- if 'cookies' not in forwarded_values:
- forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
- forwarded_values['cookies'] = {'forward': forward}
+ forwarded_values["headers"] = ansible_list_to_cloudfront_list(headers)
+ if "cookies" not in forwarded_values:
+ forward = existing_config.get("cookies", {}).get(
+ "forward", self.__default_cache_behavior_forwarded_values_forward_cookies
+ )
+ forwarded_values["cookies"] = {"forward": forward}
else:
- existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
- whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
+ existing_whitelist = existing_config.get("cookies", {}).get("whitelisted_names", {}).get("items")
+ whitelist = forwarded_values.get("cookies").get("whitelisted_names", existing_whitelist)
if whitelist:
- self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
- forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
- cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
- self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
- self.__valid_cookie_forwarding)
- forwarded_values['cookies']['forward'] = cookie_forwarding
- query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
- self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
- forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
- forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
- existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
- cache_behavior['forwarded_values'] = forwarded_values
+ self.validate_is_list(whitelist, "forwarded_values.whitelisted_names")
+ forwarded_values["cookies"]["whitelisted_names"] = ansible_list_to_cloudfront_list(whitelist)
+ cookie_forwarding = forwarded_values.get("cookies").get(
+ "forward", existing_config.get("cookies", {}).get("forward")
+ )
+ self.validate_attribute_with_allowed_values(
+ cookie_forwarding, "cache_behavior.forwarded_values.cookies.forward", self.__valid_cookie_forwarding
+ )
+ forwarded_values["cookies"]["forward"] = cookie_forwarding
+ query_string_cache_keys = forwarded_values.get(
+ "query_string_cache_keys", existing_config.get("query_string_cache_keys", {}).get("items", [])
+ )
+ self.validate_is_list(query_string_cache_keys, "forwarded_values.query_string_cache_keys")
+ forwarded_values["query_string_cache_keys"] = ansible_list_to_cloudfront_list(query_string_cache_keys)
+ forwarded_values = self.add_missing_key(
+ forwarded_values,
+ "query_string",
+ existing_config.get("query_string", self.__default_cache_behavior_forwarded_values_query_string),
+ )
+ cache_behavior["forwarded_values"] = forwarded_values
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating forwarded values")
@@ -1851,57 +2065,68 @@ class CloudFrontValidationManager(object):
def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
try:
if lambda_function_associations is not None:
- self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
+ self.validate_is_list(lambda_function_associations, "lambda_function_associations")
for association in lambda_function_associations:
- association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
- self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
- self.__valid_lambda_function_association_event_types)
- cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
+ association = change_dict_key_name(association, "lambda_function_arn", "lambda_function_a_r_n")
+ self.validate_attribute_with_allowed_values(
+ association.get("event_type"),
+ "cache_behaviors[].lambda_function_associations.event_type",
+ self.__valid_lambda_function_association_event_types,
+ )
+ cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list(
+ lambda_function_associations
+ )
else:
- if 'lambda_function_associations' in config:
- cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
+ if "lambda_function_associations" in config:
+ cache_behavior["lambda_function_associations"] = config.get("lambda_function_associations")
else:
- cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
+ cache_behavior["lambda_function_associations"] = ansible_list_to_cloudfront_list([])
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating lambda function associations")
def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior):
if field_level_encryption_id is not None:
- cache_behavior['field_level_encryption_id'] = field_level_encryption_id
- elif 'field_level_encryption_id' in config:
- cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id')
+ cache_behavior["field_level_encryption_id"] = field_level_encryption_id
+ elif "field_level_encryption_id" in config:
+ cache_behavior["field_level_encryption_id"] = config.get("field_level_encryption_id")
else:
- cache_behavior['field_level_encryption_id'] = ""
+ cache_behavior["field_level_encryption_id"] = ""
return cache_behavior
def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
try:
if allowed_methods is not None:
- self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
- temp_allowed_items = allowed_methods.get('items')
- self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
- self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
- self.__valid_methods_allowed_methods)
- cached_items = allowed_methods.get('cached_methods')
- if 'cached_methods' in allowed_methods:
- self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
- self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
- self.__valid_methods_cached_methods)
+ self.validate_required_key("items", "cache_behavior.allowed_methods.items[]", allowed_methods)
+ temp_allowed_items = allowed_methods.get("items")
+ self.validate_is_list(temp_allowed_items, "cache_behavior.allowed_methods.items")
+ self.validate_attribute_list_with_allowed_list(
+ temp_allowed_items, "cache_behavior.allowed_methods.items[]", self.__valid_methods_allowed_methods
+ )
+ cached_items = allowed_methods.get("cached_methods")
+ if "cached_methods" in allowed_methods:
+ self.validate_is_list(cached_items, "cache_behavior.allowed_methods.cached_methods")
+ self.validate_attribute_list_with_allowed_list(
+ cached_items,
+ "cache_behavior.allowed_items.cached_methods[]",
+ self.__valid_methods_cached_methods,
+ )
# we don't care if the order of how cloudfront stores the methods differs - preserving existing
# order reduces likelihood of making unnecessary changes
- if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
- cache_behavior['allowed_methods'] = config['allowed_methods']
+ if "allowed_methods" in config and set(config["allowed_methods"]["items"]) == set(temp_allowed_items):
+ cache_behavior["allowed_methods"] = config["allowed_methods"]
else:
- cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
+ cache_behavior["allowed_methods"] = ansible_list_to_cloudfront_list(temp_allowed_items)
- if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
- cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
+ if cached_items and set(cached_items) == set(
+ config.get("allowed_methods", {}).get("cached_methods", {}).get("items", [])
+ ):
+ cache_behavior["allowed_methods"]["cached_methods"] = config["allowed_methods"]["cached_methods"]
else:
- cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
+ cache_behavior["allowed_methods"]["cached_methods"] = ansible_list_to_cloudfront_list(cached_items)
else:
- if 'allowed_methods' in config:
- cache_behavior['allowed_methods'] = config.get('allowed_methods')
+ if "allowed_methods" in config:
+ cache_behavior["allowed_methods"] = config.get("allowed_methods")
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating allowed methods")
@@ -1910,14 +2135,16 @@ class CloudFrontValidationManager(object):
try:
if trusted_signers is None:
trusted_signers = {}
- if 'items' in trusted_signers:
- valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
+ if "items" in trusted_signers:
+ valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get("items"))
else:
- valid_trusted_signers = dict(quantity=config.get('quantity', 0))
- if 'items' in config:
- valid_trusted_signers = dict(items=config['items'])
- valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
- cache_behavior['trusted_signers'] = valid_trusted_signers
+ valid_trusted_signers = dict(quantity=config.get("quantity", 0))
+ if "items" in config:
+ valid_trusted_signers = dict(items=config["items"])
+ valid_trusted_signers["enabled"] = trusted_signers.get(
+ "enabled", config.get("enabled", self.__default_trusted_signers_enabled)
+ )
+ cache_behavior["trusted_signers"] = valid_trusted_signers
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating trusted signers")
@@ -1926,19 +2153,37 @@ class CloudFrontValidationManager(object):
try:
if viewer_certificate is None:
return None
- if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
- self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
- "_certificate set to true.")
- self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
- self.__valid_viewer_certificate_ssl_support_methods)
- self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
- self.__valid_viewer_certificate_minimum_protocol_versions)
- self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
- self.__valid_viewer_certificate_certificate_sources)
- viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
- viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
- viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
- viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
+ if (
+ viewer_certificate.get("cloudfront_default_certificate")
+ and viewer_certificate.get("ssl_support_method") is not None
+ ):
+ self.module.fail_json(
+ msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default"
+ + "_certificate set to true."
+ )
+ self.validate_attribute_with_allowed_values(
+ viewer_certificate.get("ssl_support_method"),
+ "viewer_certificate.ssl_support_method",
+ self.__valid_viewer_certificate_ssl_support_methods,
+ )
+ self.validate_attribute_with_allowed_values(
+ viewer_certificate.get("minimum_protocol_version"),
+ "viewer_certificate.minimum_protocol_version",
+ self.__valid_viewer_certificate_minimum_protocol_versions,
+ )
+ self.validate_attribute_with_allowed_values(
+ viewer_certificate.get("certificate_source"),
+ "viewer_certificate.certificate_source",
+ self.__valid_viewer_certificate_certificate_sources,
+ )
+ viewer_certificate = change_dict_key_name(
+ viewer_certificate, "cloudfront_default_certificate", "cloud_front_default_certificate"
+ )
+ viewer_certificate = change_dict_key_name(viewer_certificate, "ssl_support_method", "s_s_l_support_method")
+ viewer_certificate = change_dict_key_name(viewer_certificate, "iam_certificate_id", "i_a_m_certificate_id")
+ viewer_certificate = change_dict_key_name(
+ viewer_certificate, "acm_certificate_arn", "a_c_m_certificate_arn"
+ )
return viewer_certificate
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating viewer certificate")
@@ -1947,16 +2192,18 @@ class CloudFrontValidationManager(object):
try:
if custom_error_responses is None and not purge_custom_error_responses:
return ansible_list_to_cloudfront_list(config)
- self.validate_is_list(custom_error_responses, 'custom_error_responses')
+ self.validate_is_list(custom_error_responses, "custom_error_responses")
result = list()
- existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
+ existing_responses = dict((response["error_code"], response) for response in custom_error_responses)
for custom_error_response in custom_error_responses:
- self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
- custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
- if 'response_code' in custom_error_response:
- custom_error_response['response_code'] = str(custom_error_response['response_code'])
- if custom_error_response['error_code'] in existing_responses:
- del existing_responses[custom_error_response['error_code']]
+ self.validate_required_key("error_code", "custom_error_responses[].error_code", custom_error_response)
+ custom_error_response = change_dict_key_name(
+ custom_error_response, "error_caching_min_ttl", "error_caching_min_t_t_l"
+ )
+ if "response_code" in custom_error_response:
+ custom_error_response["response_code"] = str(custom_error_response["response_code"])
+ if custom_error_response["error_code"] in existing_responses:
+ del existing_responses[custom_error_response["error_code"]]
result.append(custom_error_response)
if not purge_custom_error_responses:
result.extend(existing_responses.values())
@@ -1972,54 +2219,72 @@ class CloudFrontValidationManager(object):
return None
else:
return config
- self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
- geo_restriction = restrictions.get('geo_restriction')
- self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
- existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
- geo_restriction_items = geo_restriction.get('items')
+ self.validate_required_key("geo_restriction", "restrictions.geo_restriction", restrictions)
+ geo_restriction = restrictions.get("geo_restriction")
+ self.validate_required_key(
+ "restriction_type", "restrictions.geo_restriction.restriction_type", geo_restriction
+ )
+ existing_restrictions = (
+ config.get("geo_restriction", {}).get(geo_restriction["restriction_type"], {}).get("items", [])
+ )
+ geo_restriction_items = geo_restriction.get("items")
if not purge_restrictions:
- geo_restriction_items.extend([rest for rest in existing_restrictions if
- rest not in geo_restriction_items])
+ geo_restriction_items.extend(
+ [rest for rest in existing_restrictions if rest not in geo_restriction_items]
+ )
valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
- valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
- return {'geo_restriction': valid_restrictions}
+ valid_restrictions["restriction_type"] = geo_restriction.get("restriction_type")
+ return {"geo_restriction": valid_restrictions}
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating restrictions")
- def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
+ def validate_distribution_config_parameters(
+ self, config, default_root_object, ipv6_enabled, http_version, web_acl_id
+ ):
try:
- config['default_root_object'] = default_root_object or config.get('default_root_object', '')
- config['is_i_p_v6_enabled'] = ipv6_enabled if ipv6_enabled is not None else config.get('is_i_p_v6_enabled', self.__default_ipv6_enabled)
- if http_version is not None or config.get('http_version'):
- self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
- config['http_version'] = http_version or config.get('http_version')
- if web_acl_id or config.get('web_a_c_l_id'):
- config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
+ config["default_root_object"] = default_root_object or config.get("default_root_object", "")
+ config["is_i_p_v6_enabled"] = (
+ ipv6_enabled
+ if ipv6_enabled is not None
+ else config.get("is_i_p_v6_enabled", self.__default_ipv6_enabled)
+ )
+ if http_version is not None or config.get("http_version"):
+ self.validate_attribute_with_allowed_values(http_version, "http_version", self.__valid_http_versions)
+ config["http_version"] = http_version or config.get("http_version")
+ if web_acl_id or config.get("web_a_c_l_id"):
+ config["web_a_c_l_id"] = web_acl_id or config.get("web_a_c_l_id")
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
- def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
+ def validate_common_distribution_parameters(
+ self, config, enabled, aliases, logging, price_class, purge_aliases=False
+ ):
try:
if config is None:
config = {}
if aliases is not None:
if not purge_aliases:
- aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
- if alias not in aliases])
- config['aliases'] = ansible_list_to_cloudfront_list(aliases)
+ aliases.extend(
+ [alias for alias in config.get("aliases", {}).get("items", []) if alias not in aliases]
+ )
+ config["aliases"] = ansible_list_to_cloudfront_list(aliases)
if logging is not None:
- config['logging'] = self.validate_logging(logging)
- config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
+ config["logging"] = self.validate_logging(logging)
+ config["enabled"] = (
+ enabled if enabled is not None else config.get("enabled", self.__default_distribution_enabled)
+ )
if price_class is not None:
- self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
- config['price_class'] = price_class
+ self.validate_attribute_with_allowed_values(price_class, "price_class", self.__valid_price_classes)
+ config["price_class"] = price_class
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
def validate_comment(self, config, comment):
- config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
+ config["comment"] = comment or config.get(
+ "comment", "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string
+ )
return config
def validate_caller_reference(self, caller_reference):
@@ -2028,37 +2293,52 @@ class CloudFrontValidationManager(object):
def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
try:
if valid_origins is not None:
- valid_origins_list = valid_origins.get('items')
- if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
- return str(valid_origins_list[0].get('id'))
- self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
+ valid_origins_list = valid_origins.get("items")
+ if (
+ valid_origins_list is not None
+ and isinstance(valid_origins_list, list)
+ and len(valid_origins_list) > 0
+ ):
+ return str(valid_origins_list[0].get("id"))
+ self.module.fail_json(
+ msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration."
+ )
except Exception as e:
self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
try:
self.validate_is_list(attribute_list, attribute_list_name)
- if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
- isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
- self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
+ if (
+ isinstance(allowed_list, list)
+ and set(attribute_list) not in allowed_list
+ or isinstance(allowed_list, set)
+ and not set(allowed_list).issuperset(attribute_list)
+ ):
+ attribute_list = " ".join(str(a) for a in allowed_list)
+ self.module.fail_json(msg=f"The attribute list {attribute_list_name} must be one of [{attribute_list}]")
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
if attribute is not None and attribute not in allowed_list:
- self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
+ attribute_list = " ".join(str(a) for a in allowed_list)
+ self.module.fail_json(msg=f"The attribute {attribute_name} must be one of [{attribute_list}]")
def validate_distribution_from_caller_reference(self, caller_reference):
try:
- distributions = self.__cloudfront_facts_mgr.list_distributions(False)
- distribution_name = 'Distribution'
- distribution_config_name = 'DistributionConfig'
- distribution_ids = [dist.get('Id') for dist in distributions]
+ distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False)
+ distribution_name = "Distribution"
+ distribution_config_name = "DistributionConfig"
+ distribution_ids = [dist.get("Id") for dist in distributions]
for distribution_id in distribution_ids:
- distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
+ distribution = self.__cloudfront_facts_mgr.get_distribution(id=distribution_id)
if distribution is not None:
distribution_config = distribution[distribution_name].get(distribution_config_name)
- if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
+ if (
+ distribution_config is not None
+ and distribution_config.get("CallerReference") == caller_reference
+ ):
distribution[distribution_name][distribution_config_name] = distribution_config
return distribution
@@ -2073,68 +2353,73 @@ class CloudFrontValidationManager(object):
if aliases and distribution_id is None:
distribution_id = self.validate_distribution_id_from_alias(aliases)
if distribution_id:
- return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
+ return self.__cloudfront_facts_mgr.get_distribution(id=distribution_id)
return None
except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
+ self.module.fail_json_aws(
+ e, msg="Error validating distribution_id from alias, aliases and caller reference"
+ )
def validate_distribution_id_from_alias(self, aliases):
- distributions = self.__cloudfront_facts_mgr.list_distributions(False)
+ distributions = self.__cloudfront_facts_mgr.list_distributions(keyed=False)
if distributions:
for distribution in distributions:
- distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
+ distribution_aliases = distribution.get("Aliases", {}).get("Items", [])
if set(aliases) & set(distribution_aliases):
- return distribution['Id']
+ return distribution["Id"]
return None
def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
if distribution_id is None:
- distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id']
+ distribution = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)
+ distribution_id = distribution["Distribution"]["Id"]
try:
- waiter = client.get_waiter('distribution_deployed')
+ waiter = client.get_waiter("distribution_deployed")
attempts = 1 + int(wait_timeout / 60)
- waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
+ waiter.wait(Id=distribution_id, WaiterConfig={"MaxAttempts": attempts})
except botocore.exceptions.WaiterError as e:
- self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action."
- " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)))
+ self.module.fail_json_aws(
+ e,
+ msg=f"Timeout waiting for CloudFront action. Waited for {to_text(wait_timeout)} seconds before timeout.",
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
+ self.module.fail_json_aws(e, msg=f"Error getting distribution {distribution_id}")
def main():
argument_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
+ state=dict(choices=["present", "absent"], default="present"),
caller_reference=dict(),
comment=dict(),
distribution_id=dict(),
e_tag=dict(),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
alias=dict(),
- aliases=dict(type='list', default=[], elements='str'),
- purge_aliases=dict(type='bool', default=False),
+ aliases=dict(type="list", default=[], elements="str"),
+ purge_aliases=dict(type="bool", default=False),
default_root_object=dict(),
- origins=dict(type='list', elements='dict'),
- purge_origins=dict(type='bool', default=False),
- default_cache_behavior=dict(type='dict'),
- cache_behaviors=dict(type='list', elements='dict'),
- purge_cache_behaviors=dict(type='bool', default=False),
- custom_error_responses=dict(type='list', elements='dict'),
- purge_custom_error_responses=dict(type='bool', default=False),
- logging=dict(type='dict'),
+ origins=dict(type="list", elements="dict"),
+ purge_origins=dict(type="bool", default=False),
+ default_cache_behavior=dict(type="dict"),
+ cache_behaviors=dict(type="list", elements="dict"),
+ purge_cache_behaviors=dict(type="bool", default=False),
+ custom_error_responses=dict(type="list", elements="dict"),
+ purge_custom_error_responses=dict(type="bool", default=False),
+ logging=dict(type="dict"),
price_class=dict(),
- enabled=dict(type='bool'),
- viewer_certificate=dict(type='dict'),
- restrictions=dict(type='dict'),
+ enabled=dict(type="bool"),
+ viewer_certificate=dict(type="dict"),
+ restrictions=dict(type="dict"),
web_acl_id=dict(),
http_version=dict(),
- ipv6_enabled=dict(type='bool'),
+ ipv6_enabled=dict(type="bool"),
default_origin_domain_name=dict(),
default_origin_path=dict(),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=1800, type='int')
+ wait=dict(default=False, type="bool"),
+ wait_timeout=dict(default=1800, type="int"),
)
result = {}
@@ -2144,129 +2429,154 @@ def main():
argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=[
- ['distribution_id', 'alias'],
- ['default_origin_domain_name', 'distribution_id'],
- ['default_origin_domain_name', 'alias'],
- ]
+ ["distribution_id", "alias"],
+ ["default_origin_domain_name", "distribution_id"],
+ ["default_origin_domain_name", "alias"],
+ ],
)
- client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff())
validation_mgr = CloudFrontValidationManager(module)
- state = module.params.get('state')
- caller_reference = module.params.get('caller_reference')
- comment = module.params.get('comment')
- e_tag = module.params.get('e_tag')
- tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
- distribution_id = module.params.get('distribution_id')
- alias = module.params.get('alias')
- aliases = module.params.get('aliases')
- purge_aliases = module.params.get('purge_aliases')
- default_root_object = module.params.get('default_root_object')
- origins = module.params.get('origins')
- purge_origins = module.params.get('purge_origins')
- default_cache_behavior = module.params.get('default_cache_behavior')
- cache_behaviors = module.params.get('cache_behaviors')
- purge_cache_behaviors = module.params.get('purge_cache_behaviors')
- custom_error_responses = module.params.get('custom_error_responses')
- purge_custom_error_responses = module.params.get('purge_custom_error_responses')
- logging = module.params.get('logging')
- price_class = module.params.get('price_class')
- enabled = module.params.get('enabled')
- viewer_certificate = module.params.get('viewer_certificate')
- restrictions = module.params.get('restrictions')
- purge_restrictions = module.params.get('purge_restrictions')
- web_acl_id = module.params.get('web_acl_id')
- http_version = module.params.get('http_version')
- ipv6_enabled = module.params.get('ipv6_enabled')
- default_origin_domain_name = module.params.get('default_origin_domain_name')
- default_origin_path = module.params.get('default_origin_path')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
+ state = module.params.get("state")
+ caller_reference = module.params.get("caller_reference")
+ comment = module.params.get("comment")
+ e_tag = module.params.get("e_tag")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ distribution_id = module.params.get("distribution_id")
+ alias = module.params.get("alias")
+ aliases = module.params.get("aliases")
+ purge_aliases = module.params.get("purge_aliases")
+ default_root_object = module.params.get("default_root_object")
+ origins = module.params.get("origins")
+ purge_origins = module.params.get("purge_origins")
+ default_cache_behavior = module.params.get("default_cache_behavior")
+ cache_behaviors = module.params.get("cache_behaviors")
+ purge_cache_behaviors = module.params.get("purge_cache_behaviors")
+ custom_error_responses = module.params.get("custom_error_responses")
+ purge_custom_error_responses = module.params.get("purge_custom_error_responses")
+ logging = module.params.get("logging")
+ price_class = module.params.get("price_class")
+ enabled = module.params.get("enabled")
+ viewer_certificate = module.params.get("viewer_certificate")
+ restrictions = module.params.get("restrictions")
+ purge_restrictions = module.params.get("purge_restrictions")
+ web_acl_id = module.params.get("web_acl_id")
+ http_version = module.params.get("http_version")
+ ipv6_enabled = module.params.get("ipv6_enabled")
+ default_origin_domain_name = module.params.get("default_origin_domain_name")
+ default_origin_path = module.params.get("default_origin_path")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
if alias and alias not in aliases:
aliases.append(alias)
- distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
+ distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(
+ distribution_id, aliases, caller_reference
+ )
- update = state == 'present' and distribution
- create = state == 'present' and not distribution
- delete = state == 'absent' and distribution
+ update = state == "present" and distribution
+ create = state == "present" and not distribution
+ delete = state == "absent" and distribution
if not (update or create or delete):
module.exit_json(changed=False)
+ config = {}
if update or delete:
- config = distribution['Distribution']['DistributionConfig']
- e_tag = distribution['ETag']
- distribution_id = distribution['Distribution']['Id']
- else:
- config = dict()
+ config = distribution["Distribution"]["DistributionConfig"]
+ e_tag = distribution["ETag"]
+ distribution_id = distribution["Distribution"]["Id"]
+
if update:
config = camel_dict_to_snake_dict(config, reversible=True)
if create or update:
- config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
- config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
- config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
- default_origin_path, create, purge_origins)
- config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
- cache_behaviors, config['origins'], purge_cache_behaviors)
- config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
- default_cache_behavior, config['origins'], True)
- config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
- custom_error_responses, purge_custom_error_responses)
- valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
+ config = validation_mgr.validate_common_distribution_parameters(
+ config, enabled, aliases, logging, price_class, purge_aliases
+ )
+ config = validation_mgr.validate_distribution_config_parameters(
+ config, default_root_object, ipv6_enabled, http_version, web_acl_id
+ )
+ config["origins"] = validation_mgr.validate_origins(
+ client,
+ config.get("origins", {}).get("items", []),
+ origins,
+ default_origin_domain_name,
+ default_origin_path,
+ create,
+ purge_origins,
+ )
+ config["cache_behaviors"] = validation_mgr.validate_cache_behaviors(
+ config.get("cache_behaviors", {}).get("items", []),
+ cache_behaviors,
+ config["origins"],
+ purge_cache_behaviors,
+ )
+ config["default_cache_behavior"] = validation_mgr.validate_cache_behavior(
+ config.get("default_cache_behavior", {}), default_cache_behavior, config["origins"], True
+ )
+ config["custom_error_responses"] = validation_mgr.validate_custom_error_responses(
+ config.get("custom_error_responses", {}).get("items", []),
+ custom_error_responses,
+ purge_custom_error_responses,
+ )
+ valid_restrictions = validation_mgr.validate_restrictions(
+ config.get("restrictions", {}), restrictions, purge_restrictions
+ )
if valid_restrictions:
- config['restrictions'] = valid_restrictions
+ config["restrictions"] = valid_restrictions
valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
- config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
+ config = merge_validation_into_config(config, valid_viewer_certificate, "viewer_certificate")
config = validation_mgr.validate_comment(config, comment)
config = snake_dict_to_camel_dict(config, capitalize_first=True)
if create:
- config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
+ config["CallerReference"] = validation_mgr.validate_caller_reference(caller_reference)
result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags or {}))
result = camel_dict_to_snake_dict(result)
- result['tags'] = list_tags_for_resource(client, module, result['arn'])
+ result["tags"] = list_tags_for_resource(client, module, result["arn"])
if delete:
- if config['Enabled']:
- config['Enabled'] = False
+ if config["Enabled"]:
+ config["Enabled"] = False
result = update_distribution(client, module, config, distribution_id, e_tag)
- validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
- distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
+ validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference"))
+ distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(
+ distribution_id, aliases, caller_reference
+ )
# e_tag = distribution['ETag']
result = delete_distribution(client, module, distribution)
if update:
- changed = config != distribution['Distribution']['DistributionConfig']
+ changed = config != distribution["Distribution"]["DistributionConfig"]
if changed:
result = update_distribution(client, module, config, distribution_id, e_tag)
else:
- result = distribution['Distribution']
- existing_tags = list_tags_for_resource(client, module, result['ARN'])
- distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
- changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
+ result = distribution["Distribution"]
+ existing_tags = list_tags_for_resource(client, module, result["ARN"])
+ distribution["Distribution"]["DistributionConfig"]["tags"] = existing_tags
+ changed |= update_tags(client, module, existing_tags, tags, purge_tags, result["ARN"])
result = camel_dict_to_snake_dict(result)
- result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
- result['diff'] = dict()
- diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
+ result["distribution_config"]["tags"] = config["tags"] = list_tags_for_resource(client, module, result["arn"])
+ result["diff"] = dict()
+ diff = recursive_diff(distribution["Distribution"]["DistributionConfig"], config)
if diff:
- result['diff']['before'] = diff[0]
- result['diff']['after'] = diff[1]
+ result["diff"]["before"] = diff[0]
+ result["diff"]["after"] = diff[1]
if wait and (create or update):
- validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
+ validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get("CallerReference"))
- if 'distribution_config' in result:
- result.update(result['distribution_config'])
- del result['distribution_config']
+ if "distribution_config" in result:
+ result.update(result["distribution_config"])
+ del result["distribution_config"]
module.exit_json(changed=changed, **result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py
index cb97664fa..3bd20868a 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_distribution_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: cloudfront_distribution_info
version_added: 1.0.0
@@ -143,12 +141,12 @@ options:
type: bool
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Get a summary of distributions
@@ -191,9 +189,9 @@ EXAMPLES = '''
- name: Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
community.aws.cloudfront_distribution_info:
all_lists: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
origin_access_identity:
description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
returned: only if I(origin_access_identity) is true
@@ -242,405 +240,169 @@ result:
as figuring out the DistributionId is usually the reason one uses this module in the first place.
returned: always
type: dict
-'''
-
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-
-
-class CloudFrontServiceManager:
- """Handles CloudFront Services"""
-
- def __init__(self, module):
- self.module = module
-
- try:
- self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff())
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
-
- def get_distribution(self, distribution_id):
- try:
- distribution = self.client.get_distribution(aws_retry=True, Id=distribution_id)
- return distribution
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing distribution")
-
- def get_distribution_config(self, distribution_id):
- try:
- distribution = self.client.get_distribution_config(aws_retry=True, Id=distribution_id)
- return distribution
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing distribution configuration")
-
- def get_origin_access_identity(self, origin_access_identity_id):
- try:
- origin_access_identity = self.client.get_cloud_front_origin_access_identity(aws_retry=True, Id=origin_access_identity_id)
- return origin_access_identity
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing origin access identity")
-
- def get_origin_access_identity_config(self, origin_access_identity_id):
- try:
- origin_access_identity = self.client.get_cloud_front_origin_access_identity_config(aws_retry=True, Id=origin_access_identity_id)
- return origin_access_identity
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
-
- def get_invalidation(self, distribution_id, invalidation_id):
- try:
- invalidation = self.client.get_invalidation(aws_retry=True, DistributionId=distribution_id, Id=invalidation_id)
- return invalidation
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing invalidation")
-
- def get_streaming_distribution(self, distribution_id):
- try:
- streaming_distribution = self.client.get_streaming_distribution(aws_retry=True, Id=distribution_id)
- return streaming_distribution
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing streaming distribution")
-
- def get_streaming_distribution_config(self, distribution_id):
- try:
- streaming_distribution = self.client.get_streaming_distribution_config(aws_retry=True, Id=distribution_id)
- return streaming_distribution
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error describing streaming distribution")
-
- # Split out paginator to allow for the backoff decorator to function
- @AWSRetry.jittered_backoff()
- def _paginated_result(self, paginator_name, **params):
- paginator = self.client.get_paginator(paginator_name)
- results = paginator.paginate(**params).build_full_result()
- return results
-
- def list_origin_access_identities(self):
- try:
- results = self._paginated_result('list_cloud_front_origin_access_identities')
- origin_access_identity_list = results.get('CloudFrontOriginAccessIdentityList', {'Items': []})
-
- if len(origin_access_identity_list['Items']) > 0:
- return origin_access_identity_list['Items']
- return {}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
-
- def list_distributions(self, keyed=True):
- try:
- results = self._paginated_result('list_distributions')
- distribution_list = results.get('DistributionList', {'Items': []})
-
- if len(distribution_list['Items']) > 0:
- distribution_list = distribution_list['Items']
- else:
- return {}
-
- if not keyed:
- return distribution_list
- return self.keyed_list_helper(distribution_list)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error listing distributions")
-
- def list_distributions_by_web_acl_id(self, web_acl_id):
- try:
- results = self._paginated_result('list_cloud_front_origin_access_identities', WebAclId=web_acl_id)
- distribution_list = results.get('DistributionList', {'Items': []})
-
- if len(distribution_list['Items']) > 0:
- distribution_list = distribution_list['Items']
- else:
- return {}
- return self.keyed_list_helper(distribution_list)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
-
- def list_invalidations(self, distribution_id):
- try:
- results = self._paginated_result('list_invalidations', DistributionId=distribution_id)
- invalidation_list = results.get('InvalidationList', {'Items': []})
-
- if len(invalidation_list['Items']) > 0:
- return invalidation_list['Items']
- return {}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error listing invalidations")
-
- def list_streaming_distributions(self, keyed=True):
- try:
- results = self._paginated_result('list_streaming_distributions')
- streaming_distribution_list = results.get('StreamingDistributionList', {'Items': []})
-
- if len(streaming_distribution_list['Items']) > 0:
- streaming_distribution_list = streaming_distribution_list['Items']
- else:
- return {}
-
- if not keyed:
- return streaming_distribution_list
- return self.keyed_list_helper(streaming_distribution_list)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error listing streaming distributions")
-
- def summary(self):
- summary_dict = {}
- summary_dict.update(self.summary_get_distribution_list(False))
- summary_dict.update(self.summary_get_distribution_list(True))
- summary_dict.update(self.summary_get_origin_access_identity_list())
- return summary_dict
-
- def summary_get_origin_access_identity_list(self):
- try:
- origin_access_identity_list = {'origin_access_identities': []}
- origin_access_identities = self.list_origin_access_identities()
- for origin_access_identity in origin_access_identities:
- oai_id = origin_access_identity['Id']
- oai_full_response = self.get_origin_access_identity(oai_id)
- oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
- origin_access_identity_list['origin_access_identities'].append(oai_summary)
- return origin_access_identity_list
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
-
- def summary_get_distribution_list(self, streaming=False):
- try:
- list_name = 'streaming_distributions' if streaming else 'distributions'
- key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
- distribution_list = {list_name: []}
- distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
- for dist in distributions:
- temp_distribution = {}
- for key_name in key_list:
- temp_distribution[key_name] = dist[key_name]
- temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
- temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
- if not streaming:
- temp_distribution['WebACLId'] = dist['WebACLId']
- invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
- if invalidation_ids:
- temp_distribution['Invalidations'] = invalidation_ids
- resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
- temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
- distribution_list[list_name].append(temp_distribution)
- return distribution_list
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error generating summary of distributions")
- except Exception as e:
- self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
- exception=traceback.format_exc())
-
- def get_etag_from_distribution_id(self, distribution_id, streaming):
- distribution = {}
- if not streaming:
- distribution = self.get_distribution(distribution_id)
- else:
- distribution = self.get_streaming_distribution(distribution_id)
- return distribution['ETag']
-
- def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
- try:
- invalidation_ids = []
- invalidations = self.list_invalidations(distribution_id)
- for invalidation in invalidations:
- invalidation_ids.append(invalidation['Id'])
- return invalidation_ids
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
-
- def get_distribution_id_from_domain_name(self, domain_name):
- try:
- distribution_id = ""
- distributions = self.list_distributions(False)
- distributions += self.list_streaming_distributions(False)
- for dist in distributions:
- if 'Items' in dist['Aliases']:
- for alias in dist['Aliases']['Items']:
- if str(alias).lower() == domain_name.lower():
- distribution_id = dist['Id']
- break
- return distribution_id
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
-
- def get_aliases_from_distribution_id(self, distribution_id):
- aliases = []
- try:
- distributions = self.list_distributions(False)
- for dist in distributions:
- if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
- for alias in dist['Aliases']['Items']:
- aliases.append(alias)
- break
- return aliases
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
-
- def keyed_list_helper(self, list_to_key):
- keyed_list = dict()
- for item in list_to_key:
- distribution_id = item['Id']
- if 'Items' in item['Aliases']:
- aliases = item['Aliases']['Items']
- for alias in aliases:
- keyed_list.update({alias: item})
- keyed_list.update({distribution_id: item})
- return keyed_list
+"""
+
+from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
- facts[distribution_id].update(details)
+ facts[distribution_id] = details
# also have a fixed key for accessing results/details returned
- facts['result'] = details
- facts['result']['DistributionId'] = distribution_id
+ facts["result"] = details
+ facts["result"]["DistributionId"] = distribution_id
for alias in aliases:
- facts[alias].update(details)
+ facts[alias] = details
return facts
def main():
argument_spec = dict(
- distribution_id=dict(required=False, type='str'),
- invalidation_id=dict(required=False, type='str'),
- origin_access_identity_id=dict(required=False, type='str'),
- domain_name_alias=dict(required=False, type='str'),
- all_lists=dict(required=False, default=False, type='bool'),
- distribution=dict(required=False, default=False, type='bool'),
- distribution_config=dict(required=False, default=False, type='bool'),
- origin_access_identity=dict(required=False, default=False, type='bool'),
- origin_access_identity_config=dict(required=False, default=False, type='bool'),
- invalidation=dict(required=False, default=False, type='bool'),
- streaming_distribution=dict(required=False, default=False, type='bool'),
- streaming_distribution_config=dict(required=False, default=False, type='bool'),
- list_origin_access_identities=dict(required=False, default=False, type='bool'),
- list_distributions=dict(required=False, default=False, type='bool'),
- list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
- list_invalidations=dict(required=False, default=False, type='bool'),
- list_streaming_distributions=dict(required=False, default=False, type='bool'),
- summary=dict(required=False, default=False, type='bool'),
+ distribution_id=dict(required=False, type="str"),
+ invalidation_id=dict(required=False, type="str"),
+ origin_access_identity_id=dict(required=False, type="str"),
+ domain_name_alias=dict(required=False, type="str"),
+ all_lists=dict(required=False, default=False, type="bool"),
+ distribution=dict(required=False, default=False, type="bool"),
+ distribution_config=dict(required=False, default=False, type="bool"),
+ origin_access_identity=dict(required=False, default=False, type="bool"),
+ origin_access_identity_config=dict(required=False, default=False, type="bool"),
+ invalidation=dict(required=False, default=False, type="bool"),
+ streaming_distribution=dict(required=False, default=False, type="bool"),
+ streaming_distribution_config=dict(required=False, default=False, type="bool"),
+ list_origin_access_identities=dict(required=False, default=False, type="bool"),
+ list_distributions=dict(required=False, default=False, type="bool"),
+ list_distributions_by_web_acl_id=dict(required=False, default=False, type="bool"),
+ list_invalidations=dict(required=False, default=False, type="bool"),
+ list_streaming_distributions=dict(required=False, default=False, type="bool"),
+ summary=dict(required=False, default=False, type="bool"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- service_mgr = CloudFrontServiceManager(module)
-
- distribution_id = module.params.get('distribution_id')
- invalidation_id = module.params.get('invalidation_id')
- origin_access_identity_id = module.params.get('origin_access_identity_id')
- web_acl_id = module.params.get('web_acl_id')
- domain_name_alias = module.params.get('domain_name_alias')
- all_lists = module.params.get('all_lists')
- distribution = module.params.get('distribution')
- distribution_config = module.params.get('distribution_config')
- origin_access_identity = module.params.get('origin_access_identity')
- origin_access_identity_config = module.params.get('origin_access_identity_config')
- invalidation = module.params.get('invalidation')
- streaming_distribution = module.params.get('streaming_distribution')
- streaming_distribution_config = module.params.get('streaming_distribution_config')
- list_origin_access_identities = module.params.get('list_origin_access_identities')
- list_distributions = module.params.get('list_distributions')
- list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
- list_invalidations = module.params.get('list_invalidations')
- list_streaming_distributions = module.params.get('list_streaming_distributions')
- summary = module.params.get('summary')
+ service_mgr = CloudFrontFactsServiceManager(module)
+
+ distribution_id = module.params.get("distribution_id")
+ invalidation_id = module.params.get("invalidation_id")
+ origin_access_identity_id = module.params.get("origin_access_identity_id")
+ web_acl_id = module.params.get("web_acl_id")
+ domain_name_alias = module.params.get("domain_name_alias")
+ all_lists = module.params.get("all_lists")
+ distribution = module.params.get("distribution")
+ distribution_config = module.params.get("distribution_config")
+ origin_access_identity = module.params.get("origin_access_identity")
+ origin_access_identity_config = module.params.get("origin_access_identity_config")
+ invalidation = module.params.get("invalidation")
+ streaming_distribution = module.params.get("streaming_distribution")
+ streaming_distribution_config = module.params.get("streaming_distribution_config")
+ list_origin_access_identities = module.params.get("list_origin_access_identities")
+ list_distributions = module.params.get("list_distributions")
+ list_distributions_by_web_acl_id = module.params.get("list_distributions_by_web_acl_id")
+ list_invalidations = module.params.get("list_invalidations")
+ list_streaming_distributions = module.params.get("list_streaming_distributions")
+ summary = module.params.get("summary")
aliases = []
- result = {'cloudfront': {}}
+ result = {"cloudfront": {}}
facts = {}
- require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
- streaming_distribution_config or list_invalidations)
+ require_distribution_id = (
+ distribution
+ or distribution_config
+ or invalidation
+ or streaming_distribution
+ or streaming_distribution_config
+ or list_invalidations
+ )
# set default to summary if no option specified
- summary = summary or not (distribution or distribution_config or origin_access_identity or
- origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
- list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
- list_streaming_distributions or list_distributions)
+ summary = summary or not (
+ distribution
+ or distribution_config
+ or origin_access_identity
+ or origin_access_identity_config
+ or invalidation
+ or streaming_distribution
+ or streaming_distribution_config
+ or list_origin_access_identities
+ or list_distributions_by_web_acl_id
+ or list_invalidations
+ or list_streaming_distributions
+ or list_distributions
+ )
# validations
if require_distribution_id and distribution_id is None and domain_name_alias is None:
- module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
- if (invalidation and invalidation_id is None):
- module.fail_json(msg='Error invalidation_id has not been specified.')
+ module.fail_json(msg="Error distribution_id or domain_name_alias have not been specified.")
+ if invalidation and invalidation_id is None:
+ module.fail_json(msg="Error invalidation_id has not been specified.")
if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
- module.fail_json(msg='Error origin_access_identity_id has not been specified.')
+ module.fail_json(msg="Error origin_access_identity_id has not been specified.")
if list_distributions_by_web_acl_id and web_acl_id is None:
- module.fail_json(msg='Error web_acl_id has not been specified.')
+ module.fail_json(msg="Error web_acl_id has not been specified.")
# get distribution id from domain name alias
if require_distribution_id and distribution_id is None:
distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
if not distribution_id:
- module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
+ module.fail_json(msg="Error unable to source a distribution id from domain_name_alias")
# set appropriate cloudfront id
- if distribution_id and not list_invalidations:
- facts = {distribution_id: {}}
- aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
- for alias in aliases:
- facts.update({alias: {}})
- if invalidation_id:
- facts.update({invalidation_id: {}})
- elif distribution_id and list_invalidations:
- facts = {distribution_id: {}}
- aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
- for alias in aliases:
- facts.update({alias: {}})
- elif origin_access_identity_id:
- facts = {origin_access_identity_id: {}}
- elif web_acl_id:
- facts = {web_acl_id: {}}
+ if invalidation_id is not None and invalidation:
+ facts.update({invalidation_id: {}})
+ if origin_access_identity_id and (origin_access_identity or origin_access_identity_config):
+ facts.update({origin_access_identity_id: {}})
+ if web_acl_id:
+ facts.update({web_acl_id: {}})
# get details based on options
if distribution:
- facts_to_set = service_mgr.get_distribution(distribution_id)
+ facts_to_set = service_mgr.get_distribution(id=distribution_id)
if distribution_config:
- facts_to_set = service_mgr.get_distribution_config(distribution_id)
+ facts_to_set = service_mgr.get_distribution_config(id=distribution_id)
if origin_access_identity:
- facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
+ facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(id=origin_access_identity_id))
if origin_access_identity_config:
- facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
+ facts[origin_access_identity_id].update(
+ service_mgr.get_origin_access_identity_config(id=origin_access_identity_id)
+ )
if invalidation:
- facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
+ facts_to_set = service_mgr.get_invalidation(distribution_id=distribution_id, id=invalidation_id)
facts[invalidation_id].update(facts_to_set)
if streaming_distribution:
- facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
+ facts_to_set = service_mgr.get_streaming_distribution(id=distribution_id)
if streaming_distribution_config:
- facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
+ facts_to_set = service_mgr.get_streaming_distribution_config(id=distribution_id)
if list_invalidations:
- facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
- if 'facts_to_set' in vars():
+ invalidations = service_mgr.list_invalidations(distribution_id=distribution_id) or {}
+ facts_to_set = {"invalidations": invalidations}
+ if "facts_to_set" in vars():
+ aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
# get list based on options
if all_lists or list_origin_access_identities:
- facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
+ facts["origin_access_identities"] = service_mgr.list_origin_access_identities() or {}
if all_lists or list_distributions:
- facts['distributions'] = service_mgr.list_distributions()
+ facts["distributions"] = service_mgr.list_distributions() or {}
if all_lists or list_streaming_distributions:
- facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
+ facts["streaming_distributions"] = service_mgr.list_streaming_distributions() or {}
if list_distributions_by_web_acl_id:
- facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
+ facts["distributions_by_web_acl_id"] = service_mgr.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) or {}
if list_invalidations:
- facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
+ facts["invalidations"] = service_mgr.list_invalidations(distribution_id=distribution_id) or {}
# default summary option
if summary:
- facts['summary'] = service_mgr.summary()
+ facts["summary"] = service_mgr.summary()
- result['changed'] = False
- result['cloudfront'].update(facts)
+ result["changed"] = False
+ result["cloudfront"].update(facts)
module.exit_json(msg="Retrieved CloudFront info.", **result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py b/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
index 767a1d181..732d135e1 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_invalidation.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
version_added: 1.0.0
@@ -14,15 +12,10 @@ module: cloudfront_invalidation
short_description: create invalidations for AWS CloudFront distributions
description:
- - Allows for invalidation of a batch of paths for a CloudFront distribution.
-
-author: Willem van Ketwich (@wilvk)
-
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - Allows for invalidation of a batch of paths for a CloudFront distribution.
+author:
+ - Willem van Ketwich (@wilvk)
options:
distribution_id:
@@ -52,10 +45,13 @@ options:
notes:
- does not support check mode
-'''
-
-EXAMPLES = r'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+EXAMPLES = r"""
- name: create a batch of invalidations using a distribution_id for a reference
community.aws.cloudfront_invalidation:
distribution_id: E15BU8SDCGSG57
@@ -73,10 +69,9 @@ EXAMPLES = r'''
- /testpathone/test4.css
- /testpathtwo/test5.js
- /testpaththree/*
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
invalidation:
description: The invalidation's information.
returned: always
@@ -130,7 +125,7 @@ location:
returned: always
type: str
sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
-'''
+"""
import datetime
@@ -142,60 +137,61 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message
from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class CloudFrontInvalidationServiceManager(object):
"""
Handles CloudFront service calls to AWS for invalidations
"""
- def __init__(self, module):
+ def __init__(self, module, cloudfront_facts_mgr):
self.module = module
- self.client = module.client('cloudfront')
+ self.client = module.client("cloudfront")
+ self.__cloudfront_facts_mgr = cloudfront_facts_mgr
def create_invalidation(self, distribution_id, invalidation_batch):
- current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
+ current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch["CallerReference"])
try:
- response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
- response.pop('ResponseMetadata', None)
+ response = self.client.create_invalidation(
+ DistributionId=distribution_id, InvalidationBatch=invalidation_batch
+ )
+ response.pop("ResponseMetadata", None)
if current_invalidation_response:
return response, False
else:
return response, True
- except is_boto3_error_message('Your request contains a caller reference that was used for a previous invalidation '
- 'batch for the same distribution.'):
- self.module.warn("InvalidationBatch target paths are not modifiable. "
- "To make a new invalidation please update caller_reference.")
+ except is_boto3_error_message(
+ "Your request contains a caller reference that was used for a previous invalidation "
+ "batch for the same distribution."
+ ):
+ self.module.warn(
+ "InvalidationBatch target paths are not modifiable. "
+ "To make a new invalidation please update caller_reference."
+ )
return current_invalidation_response, False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
def get_invalidation(self, distribution_id, caller_reference):
- current_invalidation = {}
# find all invalidations for the distribution
- try:
- paginator = self.client.get_paginator('list_invalidations')
- invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
- invalidation_ids = [inv['Id'] for inv in invalidations]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
+ invalidations = self.__cloudfront_facts_mgr.list_invalidations(distribution_id=distribution_id)
# check if there is an invalidation with the same caller reference
- for inv_id in invalidation_ids:
- try:
- invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
- caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
- if caller_ref == caller_reference:
- current_invalidation = invalidation
- break
-
- current_invalidation.pop('ResponseMetadata', None)
- return current_invalidation
+ for invalidation in invalidations:
+ invalidation_info = self.__cloudfront_facts_mgr.get_invalidation(
+ distribution_id=distribution_id, id=invalidation["Id"]
+ )
+ if invalidation_info.get("InvalidationBatch", {}).get("CallerReference") == caller_reference:
+ invalidation_info.pop("ResponseMetadata", None)
+ return invalidation_info
+ return {}
class CloudFrontInvalidationValidationManager(object):
@@ -203,9 +199,9 @@ class CloudFrontInvalidationValidationManager(object):
Manages CloudFront validations for invalidation batches
"""
- def __init__(self, module):
+ def __init__(self, module, cloudfront_facts_mgr):
self.module = module
- self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+ self.__cloudfront_facts_mgr = cloudfront_facts_mgr
def validate_distribution_id(self, distribution_id, alias):
try:
@@ -230,8 +226,8 @@ class CloudFrontInvalidationValidationManager(object):
else:
valid_caller_reference = datetime.datetime.now().isoformat()
valid_invalidation_batch = {
- 'paths': self.create_aws_list(invalidation_batch),
- 'caller_reference': valid_caller_reference
+ "paths": self.create_aws_list(invalidation_batch),
+ "caller_reference": valid_caller_reference,
}
return valid_invalidation_batch
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -243,18 +239,21 @@ def main():
caller_reference=dict(),
distribution_id=dict(),
alias=dict(),
- target_paths=dict(required=True, type='list', elements='str')
+ target_paths=dict(required=True, type="list", elements="str"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[["distribution_id", "alias"]]
+ )
- validation_mgr = CloudFrontInvalidationValidationManager(module)
- service_mgr = CloudFrontInvalidationServiceManager(module)
+ cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
+ validation_mgr = CloudFrontInvalidationValidationManager(module, cloudfront_facts_mgr)
+ service_mgr = CloudFrontInvalidationServiceManager(module, cloudfront_facts_mgr)
- caller_reference = module.params.get('caller_reference')
- distribution_id = module.params.get('distribution_id')
- alias = module.params.get('alias')
- target_paths = module.params.get('target_paths')
+ caller_reference = module.params.get("caller_reference")
+ distribution_id = module.params.get("distribution_id")
+ alias = module.params.get("alias")
+ target_paths = module.params.get("target_paths")
result = {}
@@ -266,5 +265,5 @@ def main():
module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py b/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
index c6879d0c5..bb5e3a017 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_origin_access_identity.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
version_added: 1.0.0
@@ -16,16 +14,11 @@ short_description: Create, update and delete origin access identities for a
CloudFront distribution
description:
- - Allows for easy creation, updating and deletion of origin access
- identities.
-
-author: Willem van Ketwich (@wilvk)
-
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - Allows for easy creation, updating and deletion of origin access
+ identities.
+author:
+ - Willem van Ketwich (@wilvk)
options:
state:
@@ -54,9 +47,13 @@ options:
notes:
- Does not support check mode.
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: create an origin access identity
community.aws.cloudfront_origin_access_identity:
@@ -66,19 +63,18 @@ EXAMPLES = '''
- name: update an existing origin access identity using caller_reference as an identifier
community.aws.cloudfront_origin_access_identity:
- origin_access_identity_id: E17DRN9XUOAHZX
- caller_reference: this is an example reference
- comment: this is a new comment
+ origin_access_identity_id: E17DRN9XUOAHZX
+ caller_reference: this is an example reference
+ comment: this is a new comment
- name: delete an existing origin access identity using caller_reference as an identifier
community.aws.cloudfront_origin_access_identity:
- state: absent
- caller_reference: this is an example reference
- comment: this is a new comment
-
-'''
+ state: absent
+ caller_reference: this is an example reference
+ comment: this is a new comment
+"""
-RETURN = '''
+RETURN = r"""
cloud_front_origin_access_identity:
description: The origin access identity's information.
returned: always
@@ -113,20 +109,22 @@ location:
description: The fully qualified URI of the new origin access identity just created.
returned: when initially created
type: str
-
-'''
+"""
import datetime
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class CloudFrontOriginAccessIdentityServiceManager(object):
@@ -136,35 +134,31 @@ class CloudFrontOriginAccessIdentityServiceManager(object):
def __init__(self, module):
self.module = module
- self.client = module.client('cloudfront')
+ self.client = module.client("cloudfront")
def create_origin_access_identity(self, caller_reference, comment):
try:
return self.client.create_cloud_front_origin_access_identity(
- CloudFrontOriginAccessIdentityConfig={
- 'CallerReference': caller_reference,
- 'Comment': comment
- }
+ CloudFrontOriginAccessIdentityConfig={"CallerReference": caller_reference, "Comment": comment}
)
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.")
def delete_origin_access_identity(self, origin_access_identity_id, e_tag):
try:
- return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag)
+ result = self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag)
+ return result, True
except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
+ self.module.fail_json_aws(e, msg="Error deleting Origin Access Identity.")
def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag):
changed = False
- new_config = {
- 'CallerReference': caller_reference,
- 'Comment': comment
- }
+ new_config = {"CallerReference": caller_reference, "Comment": comment}
try:
- current_config = self.client.get_cloud_front_origin_access_identity_config(
- Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig']
+ current_config = self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)[
+ "CloudFrontOriginAccessIdentityConfig"
+ ]
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.")
@@ -194,38 +188,54 @@ class CloudFrontOriginAccessIdentityValidationManager(object):
self.module = module
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
- def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id):
+ def describe_origin_access_identity(self, origin_access_identity_id, fail_if_missing=True):
try:
- if origin_access_identity_id is None:
- return
- oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id)
- if oai is not None:
- return oai.get('ETag')
- except (ClientError, BotoCoreError) as e:
+ return self.__cloudfront_facts_mgr.get_origin_access_identity(
+ id=origin_access_identity_id, fail_if_error=False
+ )
+ except is_boto3_error_code("NoSuchCloudFrontOriginAccessIdentity") as e: # pylint: disable=duplicate-except
+ if fail_if_missing:
+ self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.")
+ return {}
+ except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.")
- def validate_origin_access_identity_id_from_caller_reference(
- self, caller_reference):
- try:
- origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities()
- origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities]
- for origin_access_identity_id in origin_origin_access_identity_ids:
- oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id))
- temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference')
- if temp_caller_reference == caller_reference:
- return origin_access_identity_id
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.")
+ def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id, fail_if_missing):
+ oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing)
+ if oai is not None:
+ return oai.get("ETag")
+
+ def validate_origin_access_identity_id_from_caller_reference(self, caller_reference):
+ origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities()
+ origin_origin_access_identity_ids = [oai.get("Id") for oai in origin_access_identities]
+ for origin_access_identity_id in origin_origin_access_identity_ids:
+ oai_config = self.__cloudfront_facts_mgr.get_origin_access_identity_config(id=origin_access_identity_id)
+ temp_caller_reference = oai_config.get("CloudFrontOriginAccessIdentityConfig").get("CallerReference")
+ if temp_caller_reference == caller_reference:
+ return origin_access_identity_id
def validate_comment(self, comment):
if comment is None:
- return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
+ return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime(
+ "%Y-%m-%dT%H:%M:%S.%f"
+ )
return comment
+ def validate_caller_reference_from_origin_access_identity_id(self, origin_access_identity_id, caller_reference):
+ if caller_reference is None:
+ if origin_access_identity_id is None:
+ return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
+ oai = self.describe_origin_access_identity(origin_access_identity_id, fail_if_missing=True)
+ origin_access_config = oai.get("CloudFrontOriginAccessIdentity", {}).get(
+ "CloudFrontOriginAccessIdentityConfig", {}
+ )
+ return origin_access_config.get("CallerReference")
+ return caller_reference
+
def main():
argument_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
+ state=dict(choices=["present", "absent"], default="present"),
origin_access_identity_id=dict(),
caller_reference=dict(),
comment=dict(),
@@ -239,32 +249,41 @@ def main():
service_mgr = CloudFrontOriginAccessIdentityServiceManager(module)
validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module)
- state = module.params.get('state')
- caller_reference = module.params.get('caller_reference')
+ state = module.params.get("state")
+ caller_reference = module.params.get("caller_reference")
- comment = module.params.get('comment')
- origin_access_identity_id = module.params.get('origin_access_identity_id')
+ comment = module.params.get("comment")
+ origin_access_identity_id = module.params.get("origin_access_identity_id")
if origin_access_identity_id is None and caller_reference is not None:
- origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference)
-
- e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id)
- comment = validation_mgr.validate_comment(comment)
-
- if state == 'present':
- if origin_access_identity_id is not None and e_tag is not None:
- result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag)
+ origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(
+ caller_reference
+ )
+
+ if state == "present":
+ comment = validation_mgr.validate_comment(comment)
+ caller_reference = validation_mgr.validate_caller_reference_from_origin_access_identity_id(
+ origin_access_identity_id, caller_reference
+ )
+ if origin_access_identity_id is not None:
+ e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, True)
+ # update cloudfront origin access identity
+ result, changed = service_mgr.update_origin_access_identity(
+ caller_reference, comment, origin_access_identity_id, e_tag
+ )
else:
+ # create cloudfront origin access identity
result = service_mgr.create_origin_access_identity(caller_reference, comment)
changed = True
- elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None:
- result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
- changed = True
+ else:
+ e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id, False)
+ if e_tag:
+ result, changed = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
- result.pop('ResponseMetadata', None)
+ result.pop("ResponseMetadata", None)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py b/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py
index 01b38a3bd..a7558e8a8 100644
--- a/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py
+++ b/ansible_collections/community/aws/plugins/modules/cloudfront_response_headers_policy.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
version_added: 3.2.0
module: cloudfront_response_headers_policy
@@ -14,16 +12,11 @@ module: cloudfront_response_headers_policy
short_description: Create, update and delete response headers policies to be used in a Cloudfront distribution
description:
- - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers
- - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy)
-
-author: Stefan Horning (@stefanhorning)
-
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - Create, update and delete response headers policies to be used in a Cloudfront distribution for inserting custom headers
+ - See docs at U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_response_headers_policy)
+author:
+ - Stefan Horning (@stefanhorning)
options:
state:
@@ -57,9 +50,13 @@ options:
default: {}
type: dict
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Creationg a Cloudfront header policy using all predefined header features and a custom header for demonstration
community.aws.cloudfront_response_headers_policy:
name: my-header-policy
@@ -113,9 +110,9 @@ EXAMPLES = '''
community.aws.cloudfront_response_headers_policy:
name: my-header-policy
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
response_headers_policy:
description: The policy's information
returned: success
@@ -141,40 +138,43 @@ response_headers_policy:
type: str
returned: always
sample: my-header-policy
-'''
+"""
+
+import datetime
try:
- from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported AnsibleAWSModule
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-import datetime
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-class CloudfrontResponseHeadersPolicyService(object):
+class CloudfrontResponseHeadersPolicyService(object):
def __init__(self, module):
self.module = module
- self.client = module.client('cloudfront')
+ self.client = module.client("cloudfront")
self.check_mode = module.check_mode
def find_response_headers_policy(self, name):
try:
- policies = self.client.list_response_headers_policies()['ResponseHeadersPolicyList']['Items']
+ policies = self.client.list_response_headers_policies()["ResponseHeadersPolicyList"]["Items"]
for policy in policies:
- if policy['ResponseHeadersPolicy']['ResponseHeadersPolicyConfig']['Name'] == name:
- policy_id = policy['ResponseHeadersPolicy']['Id']
+ if policy["ResponseHeadersPolicy"]["ResponseHeadersPolicyConfig"]["Name"] == name:
+ policy_id = policy["ResponseHeadersPolicy"]["Id"]
# as the list_ request does not contain the Etag (which we need), we need to do another get_ request here
- matching_policy = self.client.get_response_headers_policy(Id=policy['ResponseHeadersPolicy']['Id'])
+ matching_policy = self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"])
break
else:
matching_policy = None
return matching_policy
- except (ParamValidationError, ClientError, BotoCoreError) as e:
+ except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error fetching policy information")
def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config):
@@ -182,17 +182,17 @@ class CloudfrontResponseHeadersPolicyService(object):
security_headers_config = snake_dict_to_camel_dict(security_headers_config, capitalize_first=True)
# Little helper for turning xss_protection into XSSProtection and not into XssProtection
- if 'XssProtection' in security_headers_config:
- security_headers_config['XSSProtection'] = security_headers_config.pop('XssProtection')
+ if "XssProtection" in security_headers_config:
+ security_headers_config["XSSProtection"] = security_headers_config.pop("XssProtection")
custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True)
config = {
- 'Name': name,
- 'Comment': comment,
- 'CorsConfig': self.insert_quantities(cors_config),
- 'SecurityHeadersConfig': security_headers_config,
- 'CustomHeadersConfig': self.insert_quantities(custom_headers_config)
+ "Name": name,
+ "Comment": comment,
+ "CorsConfig": self.insert_quantities(cors_config),
+ "SecurityHeadersConfig": security_headers_config,
+ "CustomHeadersConfig": self.insert_quantities(custom_headers_config),
}
config = {k: v for k, v in config.items() if v}
@@ -208,22 +208,24 @@ class CloudfrontResponseHeadersPolicyService(object):
try:
result = self.client.create_response_headers_policy(ResponseHeadersPolicyConfig=config)
changed = True
- except (ParamValidationError, ClientError, BotoCoreError) as e:
+ except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error creating policy")
else:
- policy_id = matching_policy['ResponseHeadersPolicy']['Id']
- etag = matching_policy['ETag']
+ policy_id = matching_policy["ResponseHeadersPolicy"]["Id"]
+ etag = matching_policy["ETag"]
try:
- result = self.client.update_response_headers_policy(Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config)
+ result = self.client.update_response_headers_policy(
+ Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config
+ )
- changed_time = result['ResponseHeadersPolicy']['LastModifiedTime']
+ changed_time = result["ResponseHeadersPolicy"]["LastModifiedTime"]
seconds = 3 # threshhold for returned timestamp age
- seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds))
+ seconds_ago = datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds)
# consider change made by this execution of the module if returned timestamp was very recent
if changed_time > seconds_ago:
changed = True
- except (ParamValidationError, ClientError, BotoCoreError) as e:
+ except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Updating creating policy")
self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
@@ -234,14 +236,14 @@ class CloudfrontResponseHeadersPolicyService(object):
if matching_policy is None:
self.module.exit_json(msg="Didn't find a matching policy by that name, not deleting")
else:
- policy_id = matching_policy['ResponseHeadersPolicy']['Id']
- etag = matching_policy['ETag']
+ policy_id = matching_policy["ResponseHeadersPolicy"]["Id"]
+ etag = matching_policy["ETag"]
if self.check_mode:
result = {}
else:
try:
result = self.client.delete_response_headers_policy(Id=policy_id, IfMatch=etag)
- except (ParamValidationError, ClientError, BotoCoreError) as e:
+ except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error deleting policy")
self.module.exit_json(changed=True, **camel_dict_to_snake_dict(result))
@@ -250,43 +252,45 @@ class CloudfrontResponseHeadersPolicyService(object):
@staticmethod
def insert_quantities(dict_with_items):
# Items on top level case
- if 'Items' in dict_with_items and isinstance(dict_with_items['Items'], list):
- dict_with_items['Quantity'] = len(dict_with_items['Items'])
+ if "Items" in dict_with_items and isinstance(dict_with_items["Items"], list):
+ dict_with_items["Quantity"] = len(dict_with_items["Items"])
# Items on second level case
for k, v in dict_with_items.items():
- if isinstance(v, dict) and 'Items' in v:
- v['Quantity'] = len(v['Items'])
+ if isinstance(v, dict) and "Items" in v:
+ v["Quantity"] = len(v["Items"])
return dict_with_items
def main():
argument_spec = dict(
- name=dict(required=True, type='str'),
- comment=dict(type='str'),
- cors_config=dict(type='dict', default=dict()),
- security_headers_config=dict(type='dict', default=dict()),
- custom_headers_config=dict(type='dict', default=dict()),
- state=dict(choices=['present', 'absent'], type='str', default='present'),
+ name=dict(required=True, type="str"),
+ comment=dict(type="str"),
+ cors_config=dict(type="dict", default=dict()),
+ security_headers_config=dict(type="dict", default=dict()),
+ custom_headers_config=dict(type="dict", default=dict()),
+ state=dict(choices=["present", "absent"], type="str", default="present"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- name = module.params.get('name')
- comment = module.params.get('comment', '')
- cors_config = module.params.get('cors_config')
- security_headers_config = module.params.get('security_headers_config')
- custom_headers_config = module.params.get('custom_headers_config')
- state = module.params.get('state')
+ name = module.params.get("name")
+ comment = module.params.get("comment", "")
+ cors_config = module.params.get("cors_config")
+ security_headers_config = module.params.get("security_headers_config")
+ custom_headers_config = module.params.get("custom_headers_config")
+ state = module.params.get("state")
service = CloudfrontResponseHeadersPolicyService(module)
- if state == 'absent':
+ if state == "absent":
service.delete_response_header_policy(name)
else:
- service.create_response_header_policy(name, comment, cors_config, security_headers_config, custom_headers_config)
+ service.create_response_header_policy(
+ name, comment, cors_config, security_headers_config, custom_headers_config
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/codebuild_project.py b/ansible_collections/community/aws/plugins/modules/codebuild_project.py
index 873b74010..1f4630f73 100644
--- a/ansible_collections/community/aws/plugins/modules/codebuild_project.py
+++ b/ansible_collections/community/aws/plugins/modules/codebuild_project.py
@@ -1,19 +1,17 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: codebuild_project
version_added: 1.0.0
short_description: Create or delete an AWS CodeBuild project
notes:
- For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
+ - I(tags) changed from boto3 format to standard dict format in release 6.0.0.
description:
- Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
- Prior to release 5.0.0 this module was called C(community.aws.aws_codebuild).
@@ -137,23 +135,6 @@ options:
description:
- The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
type: str
- tags:
- description:
- - A set of tags for the build project.
- - Mutually exclusive with the I(resource_tags) parameter.
- - In release 6.0.0 this parameter will accept a simple dictionary
- instead of the list of dictionaries format. To use the simple
- dictionary format prior to release 6.0.0 the I(resource_tags) can
- be used instead of I(tags).
- type: list
- elements: dict
- suboptions:
- key:
- description: The name of the Tag.
- type: str
- value:
- description: The value of the Tag.
- type: str
vpc_config:
description:
- The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
@@ -164,35 +145,15 @@ options:
default: 'present'
choices: ['present', 'absent']
type: str
- resource_tags:
- description:
- - A dictionary representing the tags to be applied to the build project.
- - If the I(resource_tags) parameter is not set then tags will not be modified.
- - Mutually exclusive with the I(tags) parameter.
- type: dict
- required: false
- purge_tags:
- description:
- - If I(purge_tags=true) and I(tags) is set, existing tags will be purged
- from the resource to match exactly what is defined by I(tags) parameter.
- - If the I(resource_tags) parameter is not set then tags will not be modified, even
- if I(purge_tags=True).
- - Tag keys beginning with C(aws:) are reserved by Amazon and can not be
- modified. As such they will be ignored for the purposes of the
- I(purge_tags) parameter. See the Amazon documentation for more information
- U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions).
- type: bool
- default: true
- required: false
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.boto3.modules
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags.modules
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- community.aws.codebuild_project:
@@ -200,27 +161,28 @@ EXAMPLES = r'''
description: My nice little project
service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
source:
- # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
- type: CODEPIPELINE
- buildspec: ''
+ # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
+ type: CODEPIPELINE
+ buildspec: ''
artifacts:
- namespaceType: NONE
- packaging: NONE
- type: CODEPIPELINE
- name: my_project
+ namespaceType: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: my_project
environment:
- computeType: BUILD_GENERAL1_SMALL
- privilegedMode: "true"
- image: "aws/codebuild/docker:17.09.0"
- type: LINUX_CONTAINER
- environmentVariables:
- - { name: 'PROFILE', value: 'staging' }
+ computeType: BUILD_GENERAL1_SMALL
+ privilegedMode: "true"
+ image: "aws/codebuild/docker:17.09.0"
+ type: LINUX_CONTAINER
+ environmentVariables:
+ - name: 'PROFILE'
+ value: 'staging'
encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
region: us-east-1
state: present
-'''
+"""
-RETURN = r'''
+RETURN = r"""
project:
description: Returns the dictionary describing the code project configuration.
returned: success
@@ -324,118 +286,162 @@ project:
returned: always
type: str
sample: "2018-04-17T16:56:03.245000+02:00"
-'''
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
+class CodeBuildAnsibleAWSError(AnsibleAWSError):
+ pass
-def create_or_update_project(client, params, module):
- resp = {}
- name = params['name']
- # clean up params
- formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
- permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
- permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
- formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
- formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
+def do_create_project(client, params, formatted_params):
+ if params["source"] is None or params["artifacts"] is None:
+ raise CodeBuildAnsibleAWSError(
+ message="The source and artifacts parameters must be provided when creating a new project. No existing project was found."
+ )
- # Check if project with that name already exists and if so update existing:
- found = describe_project(client=client, name=name, module=module)
- changed = False
+ if params["tags"] is not None:
+ formatted_params["tags"] = ansible_dict_to_boto3_tag_list(
+ params["tags"], tag_name_key_name="key", tag_value_key_name="value"
+ )
+
+ permitted_create_params = get_boto3_client_method_parameters(client, "create_project")
+ formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
- if 'name' in found:
- found_project = found
- found_tags = found_project.pop('tags', [])
- # Support tagging using a dict instead of the list of dicts
- if params['resource_tags'] is not None:
- if params['purge_tags']:
- tags = dict()
- else:
- tags = boto3_tag_list_to_ansible_dict(found_tags)
- tags.update(params['resource_tags'])
- formatted_update_params['tags'] = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value')
-
- resp = update_project(client=client, params=formatted_update_params, module=module)
- updated_project = resp['project']
-
- # Prep both dicts for sensible change comparison:
- found_project.pop('lastModified')
- updated_project.pop('lastModified')
- updated_tags = updated_project.pop('tags', [])
- found_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(found_tags)
- updated_project['ResourceTags'] = boto3_tag_list_to_ansible_dict(updated_tags)
-
- if updated_project != found_project:
- changed = True
- updated_project['tags'] = updated_tags
- return resp, changed
# Or create new project:
try:
- if params['source'] is None or params['artifacts'] is None:
- module.fail_json(
- "The source and artifacts parameters must be provided when "
- "creating a new project. No existing project was found.")
resp = client.create_project(**formatted_create_params)
changed = True
return resp, changed
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to create CodeBuild project")
+ raise CodeBuildAnsibleAWSError(
+ message="Unable to create CodeBuild project",
+ exception=e,
+ )
+
+
+def merge_tags(found_tags, tags, purge_tags):
+ if purge_tags:
+ return tags
+
+ merged_tags = boto3_tag_list_to_ansible_dict(found_tags)
+ merged_tags.update(tags)
+ return merged_tags
+
+
+def format_tags(tags):
+ return ansible_dict_to_boto3_tag_list(
+ tags,
+ tag_name_key_name="key",
+ tag_value_key_name="value",
+ )
+
+
+def do_update_project(client, params, formatted_params, found_project):
+ permitted_update_params = get_boto3_client_method_parameters(client, "update_project")
+ formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
+
+ found_tags = found_project.pop("tags", [])
+ if params["tags"] is not None:
+ formatted_update_params["tags"] = format_tags(
+ merge_tags(found_tags, params["tags"], params["purge_tags"]),
+ )
+
+ resp = update_project(client=client, params=formatted_update_params)
+ updated_project = resp["project"]
+
+ # Prep both dicts for sensible change comparison:
+ found_project.pop("lastModified")
+ updated_project.pop("lastModified")
+ updated_tags = updated_project.pop("tags", [])
+ found_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(found_tags)
+ updated_project["ResourceTags"] = boto3_tag_list_to_ansible_dict(updated_tags)
+
+ changed = updated_project != found_project
+
+ updated_project["tags"] = updated_tags
+ return resp, changed
-def update_project(client, params, module):
- name = params['name']
+def create_or_update_project(client, params):
+ resp = {}
+ name = params["name"]
+ # clean up params
+ formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
+
+ # Check if project with that name already exists and if so update existing:
+ found = describe_project(client=client, name=name)
+ changed = False
+
+ if "name" not in found:
+ return do_create_project(client, params, formatted_params)
+
+ return do_update_project(client, params, formatted_params, found)
+
+
+def update_project(client, params):
+ name = params["name"]
try:
resp = client.update_project(**params)
return resp
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update CodeBuild project")
+ raise CodeBuildAnsibleAWSError(
+ message="Unable to update CodeBuild project",
+ exception=e,
+ )
-def delete_project(client, name, module):
- found = describe_project(client=client, name=name, module=module)
- changed = False
- if 'name' in found:
- # Mark as changed when a project with that name existed before calling delete
- changed = True
+def delete_project(client, name):
+ found = describe_project(client=client, name=name)
+ if "name" not in found:
+ return {}, False
+
try:
resp = client.delete_project(name=name)
- return resp, changed
+ return resp, True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
+ raise CodeBuildAnsibleAWSError(
+ message="Unable to update CodeBuild project",
+ exception=e,
+ )
-def describe_project(client, name, module):
+def describe_project(client, name):
project = {}
try:
- projects = client.batch_get_projects(names=[name])['projects']
+ projects = client.batch_get_projects(names=[name])["projects"]
if len(projects) > 0:
project = projects[0]
return project
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
+ raise CodeBuildAnsibleAWSError(
+ message="Unable to describe CodeBuild projects",
+ exception=e,
+ )
def format_project_result(project_result):
formated_result = camel_dict_to_snake_dict(project_result)
- project = project_result.get('project', {})
+ project = project_result.get("project", {})
if project:
- tags = project.get('tags', [])
- formated_result['project']['resource_tags'] = boto3_tag_list_to_ansible_dict(tags)
- formated_result['ORIGINAL'] = project_result
+ tags = project.get("tags", [])
+ formated_result["project"]["resource_tags"] = boto3_tag_list_to_ansible_dict(tags)
+ formated_result["ORIGINAL"] = project_result
return formated_result
@@ -443,46 +449,44 @@ def main():
argument_spec = dict(
name=dict(required=True),
description=dict(),
- source=dict(type='dict'),
- artifacts=dict(type='dict'),
- cache=dict(type='dict'),
- environment=dict(type='dict'),
+ source=dict(type="dict"),
+ artifacts=dict(type="dict"),
+ cache=dict(type="dict"),
+ environment=dict(type="dict"),
service_role=dict(),
- timeout_in_minutes=dict(type='int', default=60),
+ timeout_in_minutes=dict(type="int", default=60),
encryption_key=dict(no_log=False),
- tags=dict(type='list', elements='dict'),
- resource_tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=True),
- vpc_config=dict(type='dict'),
- state=dict(choices=['present', 'absent'], default='present')
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ vpc_config=dict(type="dict"),
+ state=dict(choices=["present", "absent"], default="present"),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- client_conn = module.client('codebuild')
+ client_conn = module.client("codebuild")
- state = module.params.get('state')
+ state = module.params.get("state")
changed = False
- if module.params['tags']:
- module.deprecate(
- 'The tags parameter currently uses a non-standard format and has '
- 'been deprecated. In release 6.0.0 this paramater will accept '
- 'a simple key/value pair dictionary instead of the current list '
- 'of dictionaries. It is recommended to migrate to using the '
- 'resource_tags parameter which already accepts the simple dictionary '
- 'format.', version='6.0.0', collection_name='community.aws')
-
- if state == 'present':
- project_result, changed = create_or_update_project(
- client=client_conn,
- params=module.params,
- module=module)
- elif state == 'absent':
- project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
+ try:
+ if state == "present":
+ project_result, changed = create_or_update_project(
+ client=client_conn,
+ params=module.params,
+ )
+ elif state == "absent":
+ project_result, changed = delete_project(
+ client=client_conn,
+ name=module.params["name"],
+ )
+ except CodeBuildAnsibleAWSError as e:
+ if e.exception:
+ module.fail_json_aws(e.exception, msg=e.message)
+ module.fail_json(msg=e.message)
formatted_result = format_project_result(project_result)
module.exit_json(changed=changed, **formatted_result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/codecommit_repository.py b/ansible_collections/community/aws/plugins/modules/codecommit_repository.py
index fce4d15d6..14b08bd88 100644
--- a/ansible_collections/community/aws/plugins/modules/codecommit_repository.py
+++ b/ansible_collections/community/aws/plugins/modules/codecommit_repository.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: codecommit_repository
version_added: 1.0.0
@@ -17,7 +14,8 @@ description:
- See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
- Prior to release 5.0.0 this module was called C(community.aws.aws_codecommit).
The usage did not change.
-author: Shuang Wang (@ptux)
+author:
+ - Shuang Wang (@ptux)
options:
name:
description:
@@ -39,12 +37,12 @@ options:
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-RETURN = '''
+RETURN = r"""
repository_metadata:
description: "Information about the repository."
returned: always
@@ -120,9 +118,9 @@ response_metadata:
returned: always
type: str
sample: "0"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create a new repository
- community.aws.codecommit_repository:
name: repo
@@ -132,53 +130,54 @@ EXAMPLES = '''
- community.aws.codecommit_repository:
name: repo
state: absent
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class CodeCommit(object):
def __init__(self, module=None):
self._module = module
- self._client = self._module.client('codecommit')
+ self._client = self._module.client("codecommit")
self._check_mode = self._module.check_mode
def process(self):
result = dict(changed=False)
- if self._module.params['state'] == 'present':
+ if self._module.params["state"] == "present":
if not self._repository_exists():
if not self._check_mode:
result = self._create_repository()
- result['changed'] = True
+ result["changed"] = True
else:
- metadata = self._get_repository()['repositoryMetadata']
- if not metadata.get('repositoryDescription'):
- metadata['repositoryDescription'] = ''
- if metadata['repositoryDescription'] != self._module.params['description']:
+ metadata = self._get_repository()["repositoryMetadata"]
+ if not metadata.get("repositoryDescription"):
+ metadata["repositoryDescription"] = ""
+ if metadata["repositoryDescription"] != self._module.params["description"]:
if not self._check_mode:
self._update_repository()
- result['changed'] = True
+ result["changed"] = True
result.update(self._get_repository())
- if self._module.params['state'] == 'absent' and self._repository_exists():
+ if self._module.params["state"] == "absent" and self._repository_exists():
if not self._check_mode:
result = self._delete_repository()
- result['changed'] = True
+ result["changed"] = True
return result
def _repository_exists(self):
try:
- paginator = self._client.get_paginator('list_repositories')
+ paginator = self._client.get_paginator("list_repositories")
for page in paginator.paginate():
- repositories = page['repositories']
+ repositories = page["repositories"]
for item in repositories:
- if self._module.params['name'] in item.values():
+ if self._module.params["name"] in item.values():
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
@@ -187,7 +186,7 @@ class CodeCommit(object):
def _get_repository(self):
try:
result = self._client.get_repository(
- repositoryName=self._module.params['name']
+ repositoryName=self._module.params["name"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
@@ -196,8 +195,8 @@ class CodeCommit(object):
def _update_repository(self):
try:
result = self._client.update_repository_description(
- repositoryName=self._module.params['name'],
- repositoryDescription=self._module.params['description']
+ repositoryName=self._module.params["name"],
+ repositoryDescription=self._module.params["description"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
@@ -206,8 +205,8 @@ class CodeCommit(object):
def _create_repository(self):
try:
result = self._client.create_repository(
- repositoryName=self._module.params['name'],
- repositoryDescription=self._module.params['description']
+ repositoryName=self._module.params["name"],
+ repositoryDescription=self._module.params["description"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
@@ -216,7 +215,7 @@ class CodeCommit(object):
def _delete_repository(self):
try:
result = self._client.delete_repository(
- repositoryName=self._module.params['name']
+ repositoryName=self._module.params["name"],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't delete repository")
@@ -226,13 +225,13 @@ class CodeCommit(object):
def main():
argument_spec = dict(
name=dict(required=True),
- state=dict(choices=['present', 'absent'], required=True),
- description=dict(default='', aliases=['comment'])
+ state=dict(choices=["present", "absent"], required=True),
+ description=dict(default="", aliases=["comment"]),
)
ansible_aws_module = AnsibleAWSModule(
argument_spec=argument_spec,
- supports_check_mode=True
+ supports_check_mode=True,
)
aws_codecommit = CodeCommit(module=ansible_aws_module)
@@ -240,5 +239,5 @@ def main():
ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/codepipeline.py b/ansible_collections/community/aws/plugins/modules/codepipeline.py
index 5c5935cb9..b1fe60476 100644
--- a/ansible_collections/community/aws/plugins/modules/codepipeline.py
+++ b/ansible_collections/community/aws/plugins/modules/codepipeline.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: codepipeline
version_added: 1.0.0
@@ -75,16 +72,16 @@ options:
choices: ['present', 'absent']
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
-- community.aws.aws_codepipeline:
+- community.aws.codepipeline:
name: my_deploy_pipeline
role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
artifact_store:
@@ -147,9 +144,9 @@ EXAMPLES = r'''
FileName: imagedefinitions.json
region: us-east-1
state: present
-'''
+"""
-RETURN = r'''
+RETURN = r"""
pipeline:
description: Returns the dictionary describing the CodePipeline configuration.
returned: success
@@ -194,7 +191,7 @@ pipeline:
- This number is auto incremented when CodePipeline params are changed.
returned: always
type: int
-'''
+"""
import copy
@@ -205,20 +202,21 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
- pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
+ pipeline_dict = {"name": name, "roleArn": role_arn, "artifactStore": artifact_store, "stages": stages}
if version:
- pipeline_dict['version'] = version
+ pipeline_dict["version"] = version
try:
resp = client.create_pipeline(pipeline=pipeline_dict)
return resp
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict['name']))
+ module.fail_json_aws(e, msg=f"Unable create pipeline {pipeline_dict['name']}")
def update_pipeline(client, pipeline_dict, module):
@@ -226,7 +224,7 @@ def update_pipeline(client, pipeline_dict, module):
resp = client.update_pipeline(pipeline=pipeline_dict)
return resp
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict['name']))
+ module.fail_json_aws(e, msg=f"Unable update pipeline {pipeline_dict['name']}")
def delete_pipeline(client, name, module):
@@ -234,7 +232,7 @@ def delete_pipeline(client, name, module):
resp = client.delete_pipeline(name=name)
return resp
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable delete pipeline {0}".format(name))
+ module.fail_json_aws(e, msg=f"Unable delete pipeline {name}")
def describe_pipeline(client, name, version, module):
@@ -246,63 +244,69 @@ def describe_pipeline(client, name, version, module):
else:
pipeline = client.get_pipeline(name=name)
return pipeline
- except is_boto3_error_code('PipelineNotFoundException'):
+ except is_boto3_error_code("PipelineNotFoundException"):
return pipeline
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def main():
argument_spec = dict(
- name=dict(required=True, type='str'),
- role_arn=dict(required=True, type='str'),
- artifact_store=dict(required=True, type='dict'),
- stages=dict(required=True, type='list', elements='dict'),
- version=dict(type='int'),
- state=dict(choices=['present', 'absent'], default='present')
+ name=dict(required=True, type="str"),
+ role_arn=dict(required=True, type="str"),
+ artifact_store=dict(required=True, type="dict"),
+ stages=dict(required=True, type="list", elements="dict"),
+ version=dict(type="int"),
+ state=dict(choices=["present", "absent"], default="present"),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- client_conn = module.client('codepipeline')
+ client_conn = module.client("codepipeline")
- state = module.params.get('state')
+ state = module.params.get("state")
changed = False
# Determine if the CodePipeline exists
- found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
+ found_code_pipeline = describe_pipeline(
+ client=client_conn, name=module.params["name"], version=module.params["version"], module=module
+ )
pipeline_result = {}
- if state == 'present':
- if 'pipeline' in found_code_pipeline:
- pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
+ if state == "present":
+ if "pipeline" in found_code_pipeline:
+ pipeline_dict = copy.deepcopy(found_code_pipeline["pipeline"])
# Update dictionary with provided module params:
- pipeline_dict['roleArn'] = module.params['role_arn']
- pipeline_dict['artifactStore'] = module.params['artifact_store']
- pipeline_dict['stages'] = module.params['stages']
- if module.params['version'] is not None:
- pipeline_dict['version'] = module.params['version']
+ pipeline_dict["roleArn"] = module.params["role_arn"]
+ pipeline_dict["artifactStore"] = module.params["artifact_store"]
+ pipeline_dict["stages"] = module.params["stages"]
+ if module.params["version"] is not None:
+ pipeline_dict["version"] = module.params["version"]
pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
- if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
+ if compare_policies(found_code_pipeline["pipeline"], pipeline_result["pipeline"]):
changed = True
else:
pipeline_result = create_pipeline(
client=client_conn,
- name=module.params['name'],
- role_arn=module.params['role_arn'],
- artifact_store=module.params['artifact_store'],
- stages=module.params['stages'],
- version=module.params['version'],
- module=module)
+ name=module.params["name"],
+ role_arn=module.params["role_arn"],
+ artifact_store=module.params["artifact_store"],
+ stages=module.params["stages"],
+ version=module.params["version"],
+ module=module,
+ )
changed = True
- elif state == 'absent':
+ elif state == "absent":
if found_code_pipeline:
- pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
+ pipeline_result = delete_pipeline(client=client_conn, name=module.params["name"], module=module)
changed = True
module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py b/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py
index 7b92abb7f..903d5a5e1 100644
--- a/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py
+++ b/ansible_collections/community/aws/plugins/modules/config_aggregation_authorization.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: config_aggregation_authorization
version_added: 1.0.0
@@ -36,12 +33,12 @@ options:
type: str
required: true
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get current account ID
community.aws.aws_caller_info:
register: whoami
@@ -49,26 +46,26 @@ EXAMPLES = '''
state: present
authorized_account_id: '{{ whoami.account }}'
authorized_aws_region: us-east-1
-'''
-
-RETURN = '''#'''
+"""
+RETURN = r"""#"""
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def resource_exists(client, module, params):
try:
- current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
+ current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"]
authorization_exists = next(
- (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
- None
+ (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]),
+ None,
)
if authorization_exists:
return True
@@ -79,32 +76,32 @@ def resource_exists(client, module, params):
def create_resource(client, module, params, result):
try:
response = client.put_aggregation_authorization(
- AuthorizedAccountId=params['AuthorizedAccountId'],
- AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ AuthorizedAccountId=params["AuthorizedAccountId"],
+ AuthorizedAwsRegion=params["AuthorizedAwsRegion"],
)
- result['changed'] = True
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
def update_resource(client, module, params, result):
- current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
+ current_authorizations = client.describe_aggregation_authorizations()["AggregationAuthorizations"]
current_params = next(
- (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
- None
+ (item for item in current_authorizations if item["AuthorizedAccountId"] == params["AuthorizedAccountId"]),
+ None,
)
- del current_params['AggregationAuthorizationArn']
- del current_params['CreationTime']
+ del current_params["AggregationAuthorizationArn"]
+ del current_params["CreationTime"]
if params != current_params:
try:
response = client.put_aggregation_authorization(
- AuthorizedAccountId=params['AuthorizedAccountId'],
- AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ AuthorizedAccountId=params["AuthorizedAccountId"],
+ AuthorizedAwsRegion=params["AuthorizedAwsRegion"],
)
- result['changed'] = True
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
@@ -113,10 +110,10 @@ def update_resource(client, module, params, result):
def delete_resource(client, module, params, result):
try:
response = client.delete_aggregation_authorization(
- AuthorizedAccountId=params['AuthorizedAccountId'],
- AuthorizedAwsRegion=params['AuthorizedAwsRegion']
+ AuthorizedAccountId=params["AuthorizedAccountId"],
+ AuthorizedAwsRegion=params["AuthorizedAwsRegion"],
)
- result['changed'] = True
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization")
@@ -125,35 +122,35 @@ def delete_resource(client, module, params, result):
def main():
module = AnsibleAWSModule(
argument_spec={
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'authorized_account_id': dict(type='str', required=True),
- 'authorized_aws_region': dict(type='str', required=True),
+ "state": dict(type="str", choices=["present", "absent"], default="present"),
+ "authorized_account_id": dict(type="str", required=True),
+ "authorized_aws_region": dict(type="str", required=True),
},
supports_check_mode=False,
)
- result = {'changed': False}
+ result = {"changed": False}
params = {
- 'AuthorizedAccountId': module.params.get('authorized_account_id'),
- 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'),
+ "AuthorizedAccountId": module.params.get("authorized_account_id"),
+ "AuthorizedAwsRegion": module.params.get("authorized_aws_region"),
}
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("config", retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
- if module.params.get('state') == 'present':
+ if module.params.get("state") == "present":
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
- if module.params.get('state') == 'absent':
+ if module.params.get("state") == "absent":
if resource_status:
delete_resource(client, module, params, result)
- module.exit_json(changed=result['changed'])
+ module.exit_json(changed=result["changed"])
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_aggregator.py b/ansible_collections/community/aws/plugins/modules/config_aggregator.py
index 3dc4c6faa..48771080b 100644
--- a/ansible_collections/community/aws/plugins/modules/config_aggregator.py
+++ b/ansible_collections/community/aws/plugins/modules/config_aggregator.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: config_aggregator
version_added: 1.0.0
@@ -71,25 +68,25 @@ options:
type: dict
required: true
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create cross-account aggregator
community.aws.config_aggregator:
name: test_config_rule
state: present
account_sources:
account_ids:
- - 1234567890
- - 0123456789
- - 9012345678
+ - 1234567890
+ - 0123456789
+ - 9012345678
all_aws_regions: true
-'''
+"""
-RETURN = r'''#'''
+RETURN = r"""#"""
try:
@@ -97,57 +94,64 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def resource_exists(client, module, params):
try:
aggregator = client.describe_configuration_aggregators(
- ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']]
+ ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]]
)
- return aggregator['ConfigurationAggregators'][0]
- except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
+ return aggregator["ConfigurationAggregators"][0]
+ except is_boto3_error_code("NoSuchConfigurationAggregatorException"):
return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
client.put_configuration_aggregator(
- ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
- AccountAggregationSources=params['AccountAggregationSources'],
- OrganizationAggregationSource=params['OrganizationAggregationSource']
+ ConfigurationAggregatorName=params["ConfigurationAggregatorName"],
+ AccountAggregationSources=params["AccountAggregationSources"],
+ OrganizationAggregationSource=params["OrganizationAggregationSource"],
)
- result['changed'] = True
- result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ result["changed"] = True
+ result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def update_resource(client, module, params, result):
- result['changed'] = False
+ result["changed"] = False
current_params = client.describe_configuration_aggregators(
- ConfigurationAggregatorNames=[params['ConfigurationAggregatorName']]
- )['ConfigurationAggregators'][0]
+ ConfigurationAggregatorNames=[params["ConfigurationAggregatorName"]]
+ )["ConfigurationAggregators"][0]
- if params['AccountAggregationSources'] != current_params.get('AccountAggregationSources', []):
- result['changed'] = True
+ if params["AccountAggregationSources"] != current_params.get("AccountAggregationSources", []):
+ result["changed"] = True
- if params['OrganizationAggregationSource'] != current_params.get('OrganizationAggregationSource', {}):
- result['changed'] = True
+ if params["OrganizationAggregationSource"] != current_params.get("OrganizationAggregationSource", {}):
+ result["changed"] = True
- if result['changed']:
+ if result["changed"]:
try:
client.put_configuration_aggregator(
- ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
- AccountAggregationSources=params['AccountAggregationSources'],
- OrganizationAggregationSource=params['OrganizationAggregationSource']
+ ConfigurationAggregatorName=params["ConfigurationAggregatorName"],
+ AccountAggregationSources=params["AccountAggregationSources"],
+ OrganizationAggregationSource=params["OrganizationAggregationSource"],
)
- result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ result["aggregator"] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
@@ -155,10 +159,8 @@ def update_resource(client, module, params, result):
def delete_resource(client, module, params, result):
try:
- client.delete_configuration_aggregator(
- ConfigurationAggregatorName=params['ConfigurationAggregatorName']
- )
- result['changed'] = True
+ client.delete_configuration_aggregator(ConfigurationAggregatorName=params["ConfigurationAggregatorName"])
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
@@ -167,66 +169,64 @@ def delete_resource(client, module, params, result):
def main():
module = AnsibleAWSModule(
argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'account_sources': dict(type='list', required=True, elements='dict'),
- 'organization_source': dict(type='dict', required=True)
+ "name": dict(type="str", required=True),
+ "state": dict(type="str", choices=["present", "absent"], default="present"),
+ "account_sources": dict(type="list", required=True, elements="dict"),
+ "organization_source": dict(type="dict", required=True),
},
supports_check_mode=False,
)
- result = {
- 'changed': False
- }
+ result = {"changed": False}
- name = module.params.get('name')
- state = module.params.get('state')
+ name = module.params.get("name")
+ state = module.params.get("state")
params = {}
if name:
- params['ConfigurationAggregatorName'] = name
- params['AccountAggregationSources'] = []
- if module.params.get('account_sources'):
- for i in module.params.get('account_sources'):
+ params["ConfigurationAggregatorName"] = name
+ params["AccountAggregationSources"] = []
+ if module.params.get("account_sources"):
+ for i in module.params.get("account_sources"):
tmp_dict = {}
- if i.get('account_ids'):
- tmp_dict['AccountIds'] = i.get('account_ids')
- if i.get('aws_regions'):
- tmp_dict['AwsRegions'] = i.get('aws_regions')
- if i.get('all_aws_regions') is not None:
- tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
- params['AccountAggregationSources'].append(tmp_dict)
- if module.params.get('organization_source'):
- params['OrganizationAggregationSource'] = {}
- if module.params.get('organization_source').get('role_arn'):
- params['OrganizationAggregationSource'].update({
- 'RoleArn': module.params.get('organization_source').get('role_arn')
- })
- if module.params.get('organization_source').get('aws_regions'):
- params['OrganizationAggregationSource'].update({
- 'AwsRegions': module.params.get('organization_source').get('aws_regions')
- })
- if module.params.get('organization_source').get('all_aws_regions') is not None:
- params['OrganizationAggregationSource'].update({
- 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
- })
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ if i.get("account_ids"):
+ tmp_dict["AccountIds"] = i.get("account_ids")
+ if i.get("aws_regions"):
+ tmp_dict["AwsRegions"] = i.get("aws_regions")
+ if i.get("all_aws_regions") is not None:
+ tmp_dict["AllAwsRegions"] = i.get("all_aws_regions")
+ params["AccountAggregationSources"].append(tmp_dict)
+ if module.params.get("organization_source"):
+ params["OrganizationAggregationSource"] = {}
+ if module.params.get("organization_source").get("role_arn"):
+ params["OrganizationAggregationSource"].update(
+ {"RoleArn": module.params.get("organization_source").get("role_arn")}
+ )
+ if module.params.get("organization_source").get("aws_regions"):
+ params["OrganizationAggregationSource"].update(
+ {"AwsRegions": module.params.get("organization_source").get("aws_regions")}
+ )
+ if module.params.get("organization_source").get("all_aws_regions") is not None:
+ params["OrganizationAggregationSource"].update(
+ {"AllAwsRegions": module.params.get("organization_source").get("all_aws_regions")}
+ )
+
+ client = module.client("config", retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
- if state == 'present':
+ if state == "present":
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
- if state == 'absent':
+ if state == "absent":
if resource_status:
delete_resource(client, module, params, result)
- module.exit_json(changed=result['changed'])
+ module.exit_json(changed=result["changed"])
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py b/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py
index 371bd6685..1c3a3acdc 100644
--- a/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py
+++ b/ansible_collections/community/aws/plugins/modules/config_delivery_channel.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: config_delivery_channel
version_added: 1.0.0
@@ -39,6 +36,10 @@ options:
description:
- The prefix for the specified Amazon S3 bucket.
type: str
+ kms_key_arn:
+ description:
+ - The ARN of a KMS key used to encrypt objects delivered by Config. The key must belong to the same region as the destination S3 bucket.
+ type: str
sns_topic_arn:
description:
- The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.
@@ -49,22 +50,31 @@ options:
choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
+
+EXAMPLES = r"""
+- name: Create a delivery channel for AWS Config
+ community.aws.config_delivery_channel:
+ name: test_delivery_channel
+ state: present
+ s3_bucket: 'test_aws_config_bucket'
+ sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no'
+ delivery_frequency: 'Twelve_Hours'
-EXAMPLES = '''
-- name: Create Delivery Channel for AWS Config
+- name: Create a delivery channel with encrypted objects
community.aws.config_delivery_channel:
name: test_delivery_channel
state: present
s3_bucket: 'test_aws_config_bucket'
+ kms_key_arn: 'arn:aws:kms:us-east-1:123456789012:key/160f41cb-e660-4fa0-8bf6-976f53bf7851'
sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no'
delivery_frequency: 'Twelve_Hours'
-'''
+"""
-RETURN = '''#'''
+RETURN = r"""#"""
try:
@@ -74,28 +84,31 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
# this waits for an IAM role to become fully available, at the cost of
# taking a long time to fail when the IAM role/policy really is invalid
retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff(
- catch_extra_error_codes=['InsufficientDeliveryPolicyException'],
+ catch_extra_error_codes=["InsufficientDeliveryPolicyException"],
)
def resource_exists(client, module, params):
try:
channel = client.describe_delivery_channels(
- DeliveryChannelNames=[params['name']],
+ DeliveryChannelNames=[params["name"]],
aws_retry=True,
)
- return channel['DeliveryChannels'][0]
- except is_boto3_error_code('NoSuchDeliveryChannelException'):
+ return channel["DeliveryChannels"][0]
+ except is_boto3_error_code("NoSuchDeliveryChannelException"):
return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
@@ -106,49 +119,63 @@ def create_resource(client, module, params, result):
)(
DeliveryChannel=params,
)
- result['changed'] = True
- result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ result["changed"] = True
+ result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
- except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
- module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
- except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
- "Make sure the bucket exists and is available")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
+ except is_boto3_error_code("InvalidS3KeyPrefixException") as e:
+ module.fail_json_aws(
+ e,
+ msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix",
+ )
+ except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available",
+ )
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="Couldn't create AWS Config delivery channel",
+ )
def update_resource(client, module, params, result):
current_params = client.describe_delivery_channels(
- DeliveryChannelNames=[params['name']],
+ DeliveryChannelNames=[params["name"]],
aws_retry=True,
)
- if params != current_params['DeliveryChannels'][0]:
+ if params != current_params["DeliveryChannels"][0]:
try:
retry_unavailable_iam_on_put_delivery(
client.put_delivery_channel,
)(
DeliveryChannel=params,
)
- result['changed'] = True
- result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ result["changed"] = True
+ result["channel"] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
- except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
+ except is_boto3_error_code("InvalidS3KeyPrefixException") as e:
module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
- except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
- "Make sure the bucket exists and is available")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available",
+ )
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
def delete_resource(client, module, params, result):
try:
- response = client.delete_delivery_channel(
- DeliveryChannelName=params['name']
- )
- result['changed'] = True
+ response = client.delete_delivery_channel(DeliveryChannelName=params["name"])
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel")
@@ -157,62 +184,61 @@ def delete_resource(client, module, params, result):
def main():
module = AnsibleAWSModule(
argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 's3_bucket': dict(type='str', required=True),
- 's3_prefix': dict(type='str'),
- 'sns_topic_arn': dict(type='str'),
- 'delivery_frequency': dict(
- type='str',
+ "name": dict(type="str", required=True),
+ "state": dict(type="str", choices=["present", "absent"], default="present"),
+ "s3_bucket": dict(type="str", required=True),
+ "s3_prefix": dict(type="str"),
+ "kms_key_arn": dict(type="str", no_log=True),
+ "sns_topic_arn": dict(type="str"),
+ "delivery_frequency": dict(
+ type="str",
choices=[
- 'One_Hour',
- 'Three_Hours',
- 'Six_Hours',
- 'Twelve_Hours',
- 'TwentyFour_Hours'
- ]
+ "One_Hour",
+ "Three_Hours",
+ "Six_Hours",
+ "Twelve_Hours",
+ "TwentyFour_Hours",
+ ],
),
},
supports_check_mode=False,
)
- result = {
- 'changed': False
- }
+ result = {"changed": False}
- name = module.params.get('name')
- state = module.params.get('state')
+ name = module.params.get("name")
+ state = module.params.get("state")
params = {}
if name:
- params['name'] = name
- if module.params.get('s3_bucket'):
- params['s3BucketName'] = module.params.get('s3_bucket')
- if module.params.get('s3_prefix'):
- params['s3KeyPrefix'] = module.params.get('s3_prefix')
- if module.params.get('sns_topic_arn'):
- params['snsTopicARN'] = module.params.get('sns_topic_arn')
- if module.params.get('delivery_frequency'):
- params['configSnapshotDeliveryProperties'] = {
- 'deliveryFrequency': module.params.get('delivery_frequency')
- }
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ params["name"] = name
+ if module.params.get("s3_bucket"):
+ params["s3BucketName"] = module.params.get("s3_bucket")
+ if module.params.get("s3_prefix"):
+ params["s3KeyPrefix"] = module.params.get("s3_prefix")
+ if module.params.get("kms_key_arn"):
+ params["s3KmsKeyArn"] = module.params.get("kms_key_arn")
+ if module.params.get("sns_topic_arn"):
+ params["snsTopicARN"] = module.params.get("sns_topic_arn")
+ if module.params.get("delivery_frequency"):
+ params["configSnapshotDeliveryProperties"] = {"deliveryFrequency": module.params.get("delivery_frequency")}
+
+ client = module.client("config", retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
- if state == 'present':
+ if state == "present":
if not resource_status:
create_resource(client, module, params, result)
if resource_status:
update_resource(client, module, params, result)
- if state == 'absent':
+ if state == "absent":
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_recorder.py b/ansible_collections/community/aws/plugins/modules/config_recorder.py
index d90ce46cd..510bbaa23 100644
--- a/ansible_collections/community/aws/plugins/modules/config_recorder.py
+++ b/ansible_collections/community/aws/plugins/modules/config_recorder.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: config_recorder
version_added: 1.0.0
@@ -62,23 +59,23 @@ options:
- Before you can set this option, you must set I(all_supported=false).
type: dict
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create Configuration Recorder for AWS Config
community.aws.config_recorder:
name: test_configuration_recorder
state: present
role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
recording_group:
- all_supported: true
- include_global_types: true
-'''
+ all_supported: true
+ include_global_types: true
+"""
-RETURN = '''#'''
+RETURN = r"""#"""
try:
@@ -88,47 +85,43 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def resource_exists(client, module, params):
try:
- recorder = client.describe_configuration_recorders(
- ConfigurationRecorderNames=[params['name']]
- )
- return recorder['ConfigurationRecorders'][0]
- except is_boto3_error_code('NoSuchConfigurationRecorderException'):
+ recorder = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]])
+ return recorder["ConfigurationRecorders"][0]
+ except is_boto3_error_code("NoSuchConfigurationRecorderException"):
return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
- response = client.put_configuration_recorder(
- ConfigurationRecorder=params
- )
- result['changed'] = True
- result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ response = client.put_configuration_recorder(ConfigurationRecorder=params)
+ result["changed"] = True
+ result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
def update_resource(client, module, params, result):
- current_params = client.describe_configuration_recorders(
- ConfigurationRecorderNames=[params['name']]
- )
+ current_params = client.describe_configuration_recorders(ConfigurationRecorderNames=[params["name"]])
- if params != current_params['ConfigurationRecorders'][0]:
+ if params != current_params["ConfigurationRecorders"][0]:
try:
- response = client.put_configuration_recorder(
- ConfigurationRecorder=params
- )
- result['changed'] = True
- result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
+ response = client.put_configuration_recorder(ConfigurationRecorder=params)
+ result["changed"] = True
+ result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
@@ -136,77 +129,68 @@ def update_resource(client, module, params, result):
def delete_resource(client, module, params, result):
try:
- response = client.delete_configuration_recorder(
- ConfigurationRecorderName=params['name']
- )
- result['changed'] = True
+ response = client.delete_configuration_recorder(ConfigurationRecorderName=params["name"])
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
def main():
-
module = AnsibleAWSModule(
argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'role_arn': dict(type='str'),
- 'recording_group': dict(type='dict'),
+ "name": dict(type="str", required=True),
+ "state": dict(type="str", choices=["present", "absent"], default="present"),
+ "role_arn": dict(type="str"),
+ "recording_group": dict(type="dict"),
},
supports_check_mode=False,
required_if=[
- ('state', 'present', ['role_arn', 'recording_group']),
+ ("state", "present", ["role_arn", "recording_group"]),
],
)
- result = {
- 'changed': False
- }
+ result = {"changed": False}
- name = module.params.get('name')
- state = module.params.get('state')
+ name = module.params.get("name")
+ state = module.params.get("state")
params = {}
if name:
- params['name'] = name
- if module.params.get('role_arn'):
- params['roleARN'] = module.params.get('role_arn')
- if module.params.get('recording_group'):
- params['recordingGroup'] = {}
- if module.params.get('recording_group').get('all_supported') is not None:
- params['recordingGroup'].update({
- 'allSupported': module.params.get('recording_group').get('all_supported')
- })
- if module.params.get('recording_group').get('include_global_types') is not None:
- params['recordingGroup'].update({
- 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
- })
- if module.params.get('recording_group').get('resource_types'):
- params['recordingGroup'].update({
- 'resourceTypes': module.params.get('recording_group').get('resource_types')
- })
+ params["name"] = name
+ if module.params.get("role_arn"):
+ params["roleARN"] = module.params.get("role_arn")
+ if module.params.get("recording_group"):
+ params["recordingGroup"] = {}
+ if module.params.get("recording_group").get("all_supported") is not None:
+ params["recordingGroup"].update({"allSupported": module.params.get("recording_group").get("all_supported")})
+ if module.params.get("recording_group").get("include_global_types") is not None:
+ params["recordingGroup"].update(
+ {"includeGlobalResourceTypes": module.params.get("recording_group").get("include_global_types")}
+ )
+ if module.params.get("recording_group").get("resource_types"):
+ params["recordingGroup"].update(
+ {"resourceTypes": module.params.get("recording_group").get("resource_types")}
+ )
else:
- params['recordingGroup'].update({
- 'resourceTypes': []
- })
+ params["recordingGroup"].update({"resourceTypes": []})
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("config", retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
- if state == 'present':
+ if state == "present":
if not resource_status:
create_resource(client, module, params, result)
if resource_status:
update_resource(client, module, params, result)
- if state == 'absent':
+ if state == "absent":
if resource_status:
delete_resource(client, module, params, result)
- module.exit_json(changed=result['changed'])
+ module.exit_json(changed=result["changed"])
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/config_rule.py b/ansible_collections/community/aws/plugins/modules/config_rule.py
index d5cb717fd..b86a528dd 100644
--- a/ansible_collections/community/aws/plugins/modules/config_rule.py
+++ b/ansible_collections/community/aws/plugins/modules/config_rule.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: config_rule
version_added: 1.0.0
@@ -86,27 +83,26 @@ options:
choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create Config Rule for AWS Config
community.aws.config_rule:
name: test_config_rule
state: present
description: 'This AWS Config rule checks for public write access on S3 buckets'
scope:
- compliance_types:
- - 'AWS::S3::Bucket'
+ compliance_types:
+ - 'AWS::S3::Bucket'
source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
-
-'''
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
+"""
-RETURN = '''#'''
+RETURN = r"""#"""
try:
@@ -116,30 +112,32 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def rule_exists(client, module, params):
try:
rule = client.describe_config_rules(
- ConfigRuleNames=[params['ConfigRuleName']],
+ ConfigRuleNames=[params["ConfigRuleName"]],
aws_retry=True,
)
- return rule['ConfigRules'][0]
- except is_boto3_error_code('NoSuchConfigRuleException'):
+ return rule["ConfigRules"][0]
+ except is_boto3_error_code("NoSuchConfigRuleException"):
return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
- client.put_config_rule(
- ConfigRule=params
- )
- result['changed'] = True
+ client.put_config_rule(ConfigRule=params)
+ result["changed"] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
@@ -147,21 +145,19 @@ def create_resource(client, module, params, result):
def update_resource(client, module, params, result):
current_params = client.describe_config_rules(
- ConfigRuleNames=[params['ConfigRuleName']],
+ ConfigRuleNames=[params["ConfigRuleName"]],
aws_retry=True,
)
- del current_params['ConfigRules'][0]['ConfigRuleArn']
- del current_params['ConfigRules'][0]['ConfigRuleId']
- del current_params['ConfigRules'][0]['EvaluationModes']
+ del current_params["ConfigRules"][0]["ConfigRuleArn"]
+ del current_params["ConfigRules"][0]["ConfigRuleId"]
+ del current_params["ConfigRules"][0]["EvaluationModes"]
- if params != current_params['ConfigRules'][0]:
+ if params != current_params["ConfigRules"][0]:
try:
- client.put_config_rule(
- ConfigRule=params
- )
- result['changed'] = True
- result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params))
+ client.put_config_rule(ConfigRule=params)
+ result["changed"] = True
+ result["rule"] = camel_dict_to_snake_dict(rule_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
@@ -170,11 +166,11 @@ def update_resource(client, module, params, result):
def delete_resource(client, module, params, result):
try:
response = client.delete_config_rule(
- ConfigRuleName=params['ConfigRuleName'],
+ ConfigRuleName=params["ConfigRuleName"],
aws_retry=True,
)
- result['changed'] = True
- result['rule'] = {}
+ result["changed"] = True
+ result["rule"] = {}
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config rule")
@@ -183,93 +179,105 @@ def delete_resource(client, module, params, result):
def main():
module = AnsibleAWSModule(
argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'description': dict(type='str'),
- 'scope': dict(type='dict'),
- 'source': dict(type='dict', required=True),
- 'input_parameters': dict(type='str'),
- 'execution_frequency': dict(
- type='str',
+ "name": dict(type="str", required=True),
+ "state": dict(type="str", choices=["present", "absent"], default="present"),
+ "description": dict(type="str"),
+ "scope": dict(type="dict"),
+ "source": dict(type="dict", required=True),
+ "input_parameters": dict(type="str"),
+ "execution_frequency": dict(
+ type="str",
choices=[
- 'One_Hour',
- 'Three_Hours',
- 'Six_Hours',
- 'Twelve_Hours',
- 'TwentyFour_Hours'
- ]
+ "One_Hour",
+ "Three_Hours",
+ "Six_Hours",
+ "Twelve_Hours",
+ "TwentyFour_Hours",
+ ],
),
},
supports_check_mode=False,
)
- result = {
- 'changed': False
- }
+ result = {"changed": False}
- name = module.params.get('name')
- resource_type = module.params.get('resource_type')
- state = module.params.get('state')
+ name = module.params.get("name")
+ resource_type = module.params.get("resource_type")
+ state = module.params.get("state")
params = {}
if name:
- params['ConfigRuleName'] = name
- if module.params.get('description'):
- params['Description'] = module.params.get('description')
- if module.params.get('scope'):
- params['Scope'] = {}
- if module.params.get('scope').get('compliance_types'):
- params['Scope'].update({
- 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types')
- })
- if module.params.get('scope').get('tag_key'):
- params['Scope'].update({
- 'TagKey': module.params.get('scope').get('tag_key')
- })
- if module.params.get('scope').get('tag_value'):
- params['Scope'].update({
- 'TagValue': module.params.get('scope').get('tag_value')
- })
- if module.params.get('scope').get('compliance_id'):
- params['Scope'].update({
- 'ComplianceResourceId': module.params.get('scope').get('compliance_id')
- })
- if module.params.get('source'):
- params['Source'] = {}
- if module.params.get('source').get('owner'):
- params['Source'].update({
- 'Owner': module.params.get('source').get('owner')
- })
- if module.params.get('source').get('identifier'):
- params['Source'].update({
- 'SourceIdentifier': module.params.get('source').get('identifier')
- })
- if module.params.get('source').get('details'):
- params['Source'].update({
- 'SourceDetails': module.params.get('source').get('details')
- })
- if module.params.get('input_parameters'):
- params['InputParameters'] = module.params.get('input_parameters')
- if module.params.get('execution_frequency'):
- params['MaximumExecutionFrequency'] = module.params.get('execution_frequency')
- params['ConfigRuleState'] = 'ACTIVE'
+ params["ConfigRuleName"] = name
+ if module.params.get("description"):
+ params["Description"] = module.params.get("description")
+ if module.params.get("scope"):
+ params["Scope"] = {}
+ if module.params.get("scope").get("compliance_types"):
+ params["Scope"].update(
+ {
+ "ComplianceResourceTypes": module.params.get("scope").get("compliance_types"),
+ }
+ )
+ if module.params.get("scope").get("tag_key"):
+ params["Scope"].update(
+ {
+ "TagKey": module.params.get("scope").get("tag_key"),
+ }
+ )
+ if module.params.get("scope").get("tag_value"):
+ params["Scope"].update(
+ {
+ "TagValue": module.params.get("scope").get("tag_value"),
+ }
+ )
+ if module.params.get("scope").get("compliance_id"):
+ params["Scope"].update(
+ {
+ "ComplianceResourceId": module.params.get("scope").get("compliance_id"),
+ }
+ )
+ if module.params.get("source"):
+ params["Source"] = {}
+ if module.params.get("source").get("owner"):
+ params["Source"].update(
+ {
+ "Owner": module.params.get("source").get("owner"),
+ }
+ )
+ if module.params.get("source").get("identifier"):
+ params["Source"].update(
+ {
+ "SourceIdentifier": module.params.get("source").get("identifier"),
+ }
+ )
+ if module.params.get("source").get("details"):
+ params["Source"].update(
+ {
+ "SourceDetails": module.params.get("source").get("details"),
+ }
+ )
+ if module.params.get("input_parameters"):
+ params["InputParameters"] = module.params.get("input_parameters")
+ if module.params.get("execution_frequency"):
+ params["MaximumExecutionFrequency"] = module.params.get("execution_frequency")
+ params["ConfigRuleState"] = "ACTIVE"
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("config", retry_decorator=AWSRetry.jittered_backoff())
existing_rule = rule_exists(client, module, params)
- if state == 'present':
+ if state == "present":
if not existing_rule:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
- if state == 'absent':
+ if state == "absent":
if existing_rule:
delete_resource(client, module, params, result)
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/data_pipeline.py b/ansible_collections/community/aws/plugins/modules/data_pipeline.py
index fc441c10c..85849324f 100644
--- a/ansible_collections/community/aws/plugins/modules/data_pipeline.py
+++ b/ansible_collections/community/aws/plugins/modules/data_pipeline.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: data_pipeline
version_added: 1.0.0
@@ -15,10 +12,6 @@ author:
- Raghu Udiyar (@raags) <raghusiddarth@gmail.com>
- Sloane Hertel (@s-hertel) <shertel@redhat.com>
short_description: Create and manage AWS Datapipelines
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
description:
- Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects)
given to the datapipeline.
@@ -126,9 +119,13 @@ options:
type: dict
default: {}
aliases: ['resource_tags']
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create pipeline
@@ -147,23 +144,30 @@ EXAMPLES = r'''
- community.aws.data_pipeline:
name: test-dp
objects:
- - "id": "DefaultSchedule"
- "name": "Every 1 day"
- "fields":
+ - id: "DefaultSchedule"
+ name: "Every 1 day"
+ fields:
- "key": "period"
"stringValue": "1 days"
- "key": "type"
"stringValue": "Schedule"
- "key": "startAt"
"stringValue": "FIRST_ACTIVATION_DATE_TIME"
- - "id": "Default"
- "name": "Default"
- "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" },
- { "key": "role", "stringValue": "DataPipelineDefaultRole" },
- { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" },
- { "key": "scheduleType", "stringValue": "cron" },
- { "key": "schedule", "refValue": "DefaultSchedule" },
- { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ]
+ - id: "Default"
+ name: "Default"
+ fields:
+ - "key": "resourceRole"
+ "stringValue": "my_resource_role"
+ - "key": "role"
+ "stringValue": "DataPipelineDefaultRole"
+ - "key": "pipelineLogUri"
+ "stringValue": "s3://my_s3_log.txt"
+ - "key": "scheduleType"
+ "stringValue": "cron"
+ - "key": "schedule"
+ "refValue": "DefaultSchedule"
+ - "key": "failureAndRerunMode"
+ "stringValue": "CASCADE"
state: active
# Activate pipeline
@@ -177,10 +181,9 @@ EXAMPLES = r'''
name: test-dp
region: us-west-2
state: absent
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
changed:
description: whether the data pipeline has been modified
type: bool
@@ -195,7 +198,7 @@ result:
data_pipeline will be an empty dict. The msg describes the status of the operation.
returned: always
type: dict
-'''
+"""
import hashlib
import json
@@ -209,15 +212,15 @@ except ImportError:
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED']
-DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
-DP_ACTIVATING_STATE = 'ACTIVATING'
-DP_DEACTIVATING_STATE = 'DEACTIVATING'
-PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$'
+DP_ACTIVE_STATES = ["ACTIVE", "SCHEDULED"]
+DP_INACTIVE_STATES = ["INACTIVE", "PENDING", "FINISHED", "DELETING"]
+DP_ACTIVATING_STATE = "ACTIVATING"
+DP_DEACTIVATING_STATE = "DEACTIVATING"
+PIPELINE_DOESNT_EXIST = "^.*Pipeline with id: {0} does not exist$"
class DataPipelineNotFound(Exception):
@@ -238,9 +241,9 @@ def pipeline_id(client, name):
"""
pipelines = client.list_pipelines()
- for dp in pipelines['pipelineIdList']:
- if dp['name'] == name:
- return dp['id']
+ for dp in pipelines["pipelineIdList"]:
+ if dp["name"] == name:
+ return dp["id"]
raise DataPipelineNotFound
@@ -254,7 +257,7 @@ def pipeline_description(client, dp_id):
"""
try:
return client.describe_pipelines(pipelineIds=[dp_id])
- except is_boto3_error_code(['PipelineNotFoundException', 'PipelineDeletedException']):
+ except is_boto3_error_code(["PipelineNotFoundException", "PipelineDeletedException"]):
raise DataPipelineNotFound
@@ -270,10 +273,10 @@ def pipeline_field(client, dp_id, field):
"""
dp_description = pipeline_description(client, dp_id)
- for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
- if field_key['key'] == field:
- return field_key['stringValue']
- raise KeyError("Field key {0} not found!".format(field))
+ for field_key in dp_description["pipelineDescriptionList"][0]["fields"]:
+ if field_key["key"] == field:
+ return field_key["stringValue"]
+ raise KeyError(f"Field key {field} not found!")
def run_with_timeout(timeout, func, *func_args, **func_kwargs):
@@ -345,70 +348,70 @@ def pipeline_exists_timeout(client, dp_id, timeout):
def activate_pipeline(client, module):
- """Activates pipeline
-
- """
- dp_name = module.params.get('name')
- timeout = module.params.get('timeout')
+ """Activates pipeline"""
+ dp_name = module.params.get("name")
+ timeout = module.params.get("timeout")
try:
dp_id = pipeline_id(client, dp_name)
except DataPipelineNotFound:
- module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
+ module.fail_json(msg=f"Data Pipeline {dp_name} not found")
if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES:
changed = False
else:
try:
client.activate_pipeline(pipelineId=dp_id)
- except is_boto3_error_code('InvalidRequestException'):
+ except is_boto3_error_code("InvalidRequestException"):
module.fail_json(msg="You need to populate your pipeline before activation.")
try:
- pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES,
- timeout=timeout)
+ pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES, timeout=timeout)
except TimeOutException:
if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
# activated but completed more rapidly than it was checked
pass
else:
- module.fail_json(msg=('Data Pipeline {0} failed to activate '
- 'within timeout {1} seconds').format(dp_name, timeout))
+ module.fail_json(
+ msg=f"Data Pipeline {dp_name} failed to activate within timeout {timeout} seconds",
+ )
changed = True
data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': 'Data Pipeline {0} activated.'.format(dp_name)}
+ result = {
+ "data_pipeline": data_pipeline,
+ "msg": f"Data Pipeline {dp_name} activated.",
+ }
return (changed, result)
def deactivate_pipeline(client, module):
- """Deactivates pipeline
-
- """
- dp_name = module.params.get('name')
- timeout = module.params.get('timeout')
+ """Deactivates pipeline"""
+ dp_name = module.params.get("name")
+ timeout = module.params.get("timeout")
try:
dp_id = pipeline_id(client, dp_name)
except DataPipelineNotFound:
- module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
+ module.fail_json(msg=f"Data Pipeline {dp_name} not found")
if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES:
changed = False
else:
client.deactivate_pipeline(pipelineId=dp_id)
try:
- pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES,
- timeout=timeout)
+ pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES, timeout=timeout)
except TimeOutException:
- module.fail_json(msg=('Data Pipeline {0} failed to deactivate'
- 'within timeout {1} seconds').format(dp_name, timeout))
+ module.fail_json(
+ msg=f"Data Pipeline {dp_name} failed to deactivate within timeout {timeout} seconds",
+ )
changed = True
data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)}
+ result = {
+ "data_pipeline": data_pipeline,
+ "msg": f"Data Pipeline {dp_name} deactivated.",
+ }
return (changed, result)
@@ -422,11 +425,9 @@ def _delete_dp_with_check(dp_id, client, timeout):
def delete_pipeline(client, module):
- """Deletes pipeline
-
- """
- dp_name = module.params.get('name')
- timeout = module.params.get('timeout')
+ """Deletes pipeline"""
+ dp_name = module.params.get("name")
+ timeout = module.params.get("timeout")
try:
dp_id = pipeline_id(client, dp_name)
@@ -435,10 +436,13 @@ def delete_pipeline(client, module):
except DataPipelineNotFound:
changed = False
except TimeOutException:
- module.fail_json(msg=('Data Pipeline {0} failed to delete'
- 'within timeout {1} seconds').format(dp_name, timeout))
- result = {'data_pipeline': {},
- 'msg': 'Data Pipeline {0} deleted'.format(dp_name)}
+ module.fail_json(
+ msg=f"Data Pipeline {dp_name} failed to delete within timeout {timeout} seconds",
+ )
+ result = {
+ "data_pipeline": {},
+ "msg": f"Data Pipeline {dp_name} deleted",
+ }
return (changed, result)
@@ -446,14 +450,14 @@ def delete_pipeline(client, module):
def build_unique_id(module):
data = dict(module.params)
# removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline
- [data.pop(each, None) for each in ('objects', 'timeout')]
+ [data.pop(each, None) for each in ("objects", "timeout")]
json_data = json.dumps(data, sort_keys=True).encode("utf-8")
hashed_data = hashlib.md5(json_data).hexdigest()
return hashed_data
def format_tags(tags):
- """ Reformats tags
+ """Reformats tags
:param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3})
:returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}])
@@ -463,16 +467,16 @@ def format_tags(tags):
def get_result(client, dp_id):
- """ Get the current state of the data pipeline and reformat it to snake_case for exit_json
+ """Get the current state of the data pipeline and reformat it to snake_case for exit_json
:param object client: boto3 datapipeline client
:param string dp_id: pipeline id
:returns: reformatted dict of pipeline description
- """
+ """
# pipeline_description returns a pipelineDescriptionList of length 1
# dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict)
- dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0]
+ dp = pipeline_description(client, dp_id)["pipelineDescriptionList"][0]
# Get uniqueId and pipelineState in fields to add to the exit_json result
dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId")
@@ -489,8 +493,7 @@ def get_result(client, dp_id):
def diff_pipeline(client, module, objects, unique_id, dp_name):
- """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated
- """
+ """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated"""
result = {}
changed = False
create_dp = False
@@ -506,16 +509,18 @@ def diff_pipeline(client, module, objects, unique_id, dp_name):
create_dp = True
# Unique ids are the same - check if pipeline needs modification
else:
- dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects']
+ dp_objects = client.get_pipeline_definition(pipelineId=dp_id)["pipelineObjects"]
# Definition needs to be updated
if dp_objects != objects:
changed, msg = define_pipeline(client, module, objects, dp_id)
# No changes
else:
- msg = 'Data Pipeline {0} is present'.format(dp_name)
+ msg = f"Data Pipeline {dp_name} is present"
data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': msg}
+ result = {
+ "data_pipeline": data_pipeline,
+ "msg": msg,
+ }
except DataPipelineNotFound:
create_dp = True
@@ -523,30 +528,32 @@ def diff_pipeline(client, module, objects, unique_id, dp_name):
def define_pipeline(client, module, objects, dp_id):
- """Puts pipeline definition
-
- """
- dp_name = module.params.get('name')
+ """Puts pipeline definition"""
+ dp_name = module.params.get("name")
if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
- msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name)
+ msg = f"Data Pipeline {dp_name} is unable to be updated while in state FINISHED."
changed = False
elif objects:
- parameters = module.params.get('parameters')
- values = module.params.get('values')
+ parameters = module.params.get("parameters")
+ values = module.params.get("values")
try:
- client.put_pipeline_definition(pipelineId=dp_id,
- pipelineObjects=objects,
- parameterObjects=parameters,
- parameterValues=values)
- msg = 'Data Pipeline {0} has been updated.'.format(dp_name)
+ client.put_pipeline_definition(
+ pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values
+ )
+ msg = f"Data Pipeline {dp_name} has been updated."
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to put the definition for pipeline {0}. Check that string/reference fields"
- "are not empty and that the number of objects in the pipeline does not exceed maximum allowed"
- "objects".format(dp_name))
+ module.fail_json_aws(
+ e,
+ msg=(
+ f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields"
+ "are not empty and that the number of objects in the pipeline does not exceed maximum allowed"
+ "objects"
+ ),
+ )
else:
changed = False
msg = ""
@@ -555,14 +562,12 @@ def define_pipeline(client, module, objects, dp_id):
def create_pipeline(client, module):
- """Creates datapipeline. Uses uniqueId to achieve idempotency.
-
- """
- dp_name = module.params.get('name')
- objects = module.params.get('objects', None)
- description = module.params.get('description', '')
- tags = module.params.get('tags')
- timeout = module.params.get('timeout')
+ """Creates datapipeline. Uses uniqueId to achieve idempotency."""
+ dp_name = module.params.get("name")
+ objects = module.params.get("objects", None)
+ description = module.params.get("description", "")
+ tags = module.params.get("tags")
+ timeout = module.params.get("timeout")
unique_id = build_unique_id(module)
create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name)
@@ -576,24 +581,27 @@ def create_pipeline(client, module):
# Make pipeline
try:
tags = format_tags(tags)
- dp = client.create_pipeline(name=dp_name,
- uniqueId=unique_id,
- description=description,
- tags=tags)
- dp_id = dp['pipelineId']
+ dp = client.create_pipeline(name=dp_name, uniqueId=unique_id, description=description, tags=tags)
+ dp_id = dp["pipelineId"]
pipeline_exists_timeout(client, dp_id, timeout)
except TimeOutException:
- module.fail_json(msg=('Data Pipeline {0} failed to create'
- 'within timeout {1} seconds').format(dp_name, timeout))
+ module.fail_json(
+ msg=f"Data Pipeline {dp_name} failed to create within timeout {timeout} seconds",
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to create the data pipeline {0}.".format(dp_name))
+ module.fail_json_aws(
+ e,
+ msg=f"Failed to create the data pipeline {dp_name}.",
+ )
# Put pipeline definition
changed, msg = define_pipeline(client, module, objects, dp_id)
changed = True
data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg}
+ result = {
+ "data_pipeline": data_pipeline,
+ "msg": f"Data Pipeline {dp_name} created." + msg,
+ }
return (changed, result)
@@ -601,34 +609,33 @@ def create_pipeline(client, module):
def main():
argument_spec = dict(
name=dict(required=True),
- description=dict(required=False, default=''),
- objects=dict(required=False, type='list', default=[], elements='dict'),
- parameters=dict(required=False, type='list', default=[], elements='dict'),
- timeout=dict(required=False, type='int', default=300),
- state=dict(default='present', choices=['present', 'absent',
- 'active', 'inactive']),
- tags=dict(required=False, type='dict', default={}, aliases=['resource_tags']),
- values=dict(required=False, type='list', default=[], elements='dict'),
+ description=dict(required=False, default=""),
+ objects=dict(required=False, type="list", default=[], elements="dict"),
+ parameters=dict(required=False, type="list", default=[], elements="dict"),
+ timeout=dict(required=False, type="int", default=300),
+ state=dict(default="present", choices=["present", "absent", "active", "inactive"]),
+ tags=dict(required=False, type="dict", default={}, aliases=["resource_tags"]),
+ values=dict(required=False, type="list", default=[], elements="dict"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
try:
- client = module.client('datapipeline')
+ client = module.client("datapipeline")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- state = module.params.get('state')
- if state == 'present':
+ state = module.params.get("state")
+ if state == "present":
changed, result = create_pipeline(client, module)
- elif state == 'absent':
+ elif state == "absent":
changed, result = delete_pipeline(client, module)
- elif state == 'active':
+ elif state == "active":
changed, result = activate_pipeline(client, module)
- elif state == 'inactive':
+ elif state == "inactive":
changed, result = deactivate_pipeline(client, module)
module.exit_json(result=result, changed=changed)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py b/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py
index 45180ac6c..1e99fd5ea 100644
--- a/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_confirm_connection.py
@@ -1,15 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: directconnect_confirm_connection
short_description: Confirms the creation of a hosted DirectConnect connection
@@ -21,10 +16,6 @@ description:
The usage did not change.
author:
- "Matt Traynham (@mtraynham)"
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
name:
description:
@@ -36,9 +27,13 @@ options:
- The ID of the Direct Connect connection.
- One of I(connection_id) or I(name) must be specified.
type: str
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# confirm a Direct Connect by name
- name: confirm the connection id
@@ -49,29 +44,31 @@ EXAMPLES = '''
- name: confirm the connection id
community.aws.directconnect_confirm_connection:
connection_id: dxcon-xxxxxxxx
-'''
+"""
-RETURN = '''
+RETURN = r"""
connection_state:
description: The state of the connection.
returned: always
type: str
sample: pending
-'''
+"""
import traceback
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # handled by imported AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
@@ -84,26 +81,28 @@ def describe_connections(client, params):
def find_connection_id(client, connection_id=None, connection_name=None):
params = {}
if connection_id:
- params['connectionId'] = connection_id
+ params["connectionId"] = connection_id
try:
response = describe_connections(client, params)
except (BotoCoreError, ClientError) as e:
if connection_id:
- msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
+ msg = f"Failed to describe DirectConnect ID {connection_id}"
else:
msg = "Failed to describe DirectConnect connections"
- raise DirectConnectError(msg=msg,
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=msg,
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
match = []
- if len(response.get('connections', [])) == 1 and connection_id:
- if response['connections'][0]['connectionState'] != 'deleted':
- match.append(response['connections'][0]['connectionId'])
+ if len(response.get("connections", [])) == 1 and connection_id:
+ if response["connections"][0]["connectionState"] != "deleted":
+ match.append(response["connections"][0]["connectionId"])
- for conn in response.get('connections', []):
- if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
- match.append(conn['connectionId'])
+ for conn in response.get("connections", []):
+ if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted":
+ match.append(conn["connectionId"])
if len(match) == 1:
return match[0]
@@ -114,34 +113,33 @@ def find_connection_id(client, connection_id=None, connection_name=None):
def get_connection_state(client, connection_id):
try:
response = describe_connections(client, dict(connectionId=connection_id))
- return response['connections'][0]['connectionState']
+ return response["connections"][0]["connectionState"]
except (BotoCoreError, ClientError, IndexError) as e:
- raise DirectConnectError(msg="Failed to describe DirectConnect connection {0} state".format(connection_id),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Failed to describe DirectConnect connection {connection_id} state",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
def main():
- argument_spec = dict(
- connection_id=dict(),
- name=dict()
+ argument_spec = dict(connection_id=dict(), name=dict())
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["connection_id", "name"]],
+ required_one_of=[["connection_id", "name"]],
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[['connection_id', 'name']],
- required_one_of=[['connection_id', 'name']])
- client = module.client('directconnect')
+ client = module.client("directconnect")
- connection_id = module.params['connection_id']
- connection_name = module.params['name']
+ connection_id = module.params["connection_id"]
+ connection_name = module.params["name"]
changed = False
connection_state = None
try:
- connection_id = find_connection_id(client,
- connection_id,
- connection_name)
+ connection_id = find_connection_id(client, connection_id, connection_name)
connection_state = get_connection_state(client, connection_id)
- if connection_state == 'ordering':
+ if connection_state == "ordering":
client.confirm_connection(connectionId=connection_id)
changed = True
connection_state = get_connection_state(client, connection_id)
@@ -154,5 +152,5 @@ def main():
module.exit_json(changed=changed, connection_state=connection_state)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_connection.py b/ansible_collections/community/aws/plugins/modules/directconnect_connection.py
index 28d86717d..40e9bc913 100644
--- a/ansible_collections/community/aws/plugins/modules/directconnect_connection.py
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_connection.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: directconnect_connection
version_added: 1.0.0
@@ -19,10 +17,6 @@ description:
The usage did not change.
author:
- "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
state:
description:
@@ -68,9 +62,13 @@ options:
- By default this will not happen. This option must be explicitly set to C(true) to change I(bandwith) or I(location).
type: bool
default: false
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# create a Direct Connect connection
- community.aws.directconnect_connection:
@@ -102,7 +100,7 @@ EXAMPLES = """
name: ansible-test-connection
"""
-RETURN = """
+RETURN = r"""
connection:
description: The attributes of the direct connect connection.
type: complex
@@ -158,18 +156,20 @@ connection:
import traceback
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # handled by imported AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import associate_connection_and_lag
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
@@ -181,31 +181,29 @@ def connection_status(client, connection_id):
def connection_exists(client, connection_id=None, connection_name=None, verify=True):
params = {}
if connection_id:
- params['connectionId'] = connection_id
+ params["connectionId"] = connection_id
try:
response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params)
except (BotoCoreError, ClientError) as e:
if connection_id:
- msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
+ msg = f"Failed to describe DirectConnect ID {connection_id}"
else:
msg = "Failed to describe DirectConnect connections"
- raise DirectConnectError(msg=msg,
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e)
match = []
connection = []
# look for matching connections
- if len(response.get('connections', [])) == 1 and connection_id:
- if response['connections'][0]['connectionState'] != 'deleted':
- match.append(response['connections'][0]['connectionId'])
- connection.extend(response['connections'])
+ if len(response.get("connections", [])) == 1 and connection_id:
+ if response["connections"][0]["connectionState"] != "deleted":
+ match.append(response["connections"][0]["connectionId"])
+ connection.extend(response["connections"])
- for conn in response.get('connections', []):
- if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
- match.append(conn['connectionId'])
+ for conn in response.get("connections", []):
+ if connection_name == conn["connectionName"] and conn["connectionState"] != "deleted":
+ match.append(conn["connectionId"])
connection.append(conn)
# verifying if the connections exists; if true, return connection identifier, otherwise return False
@@ -215,33 +213,35 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T
return False
# not verifying if the connection exists; just return current connection info
elif len(connection) == 1:
- return {'connection': connection[0]}
- return {'connection': {}}
+ return {"connection": connection[0]}
+ return {"connection": {}}
def create_connection(client, location, bandwidth, name, lag_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
params = {
- 'location': location,
- 'bandwidth': bandwidth,
- 'connectionName': name,
+ "location": location,
+ "bandwidth": bandwidth,
+ "connectionName": name,
}
if lag_id:
- params['lagId'] = lag_id
+ params["lagId"] = lag_id
try:
connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params)
except (BotoCoreError, ClientError) as e:
- raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
- last_traceback=traceback.format_exc(),
- exception=e)
- return connection['connectionId']
+ raise DirectConnectError(
+ msg=f"Failed to create DirectConnect connection {name}",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
+ return connection["connectionId"]
def changed_properties(current_status, location, bandwidth):
- current_bandwidth = current_status['bandwidth']
- current_location = current_status['location']
+ current_bandwidth = current_status["bandwidth"]
+ current_location = current_status["location"]
return current_bandwidth != bandwidth or current_location != location
@@ -249,10 +249,10 @@ def changed_properties(current_status, location, bandwidth):
@AWSRetry.jittered_backoff(**retry_params)
def update_associations(client, latest_state, connection_id, lag_id):
changed = False
- if 'lagId' in latest_state and lag_id != latest_state['lagId']:
- disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
+ if "lagId" in latest_state and lag_id != latest_state["lagId"]:
+ disassociate_connection_and_lag(client, connection_id, lag_id=latest_state["lagId"])
changed = True
- if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
+ if (changed and lag_id) or (lag_id and "lagId" not in latest_state):
associate_connection_and_lag(client, connection_id, lag_id)
changed = True
return changed
@@ -261,16 +261,18 @@ def update_associations(client, latest_state, connection_id, lag_id):
def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
# the connection is found; get the latest state and see if it needs to be updated
if connection_id:
- latest_state = connection_status(client, connection_id=connection_id)['connection']
+ latest_state = connection_status(client, connection_id=connection_id)["connection"]
if changed_properties(latest_state, location, bandwidth) and forced_update:
ensure_absent(client, connection_id)
- return ensure_present(client=client,
- connection_id=None,
- connection_name=connection_name,
- location=location,
- bandwidth=bandwidth,
- lag_id=lag_id,
- forced_update=forced_update)
+ return ensure_present(
+ client=client,
+ connection_id=None,
+ connection_name=connection_name,
+ location=location,
+ bandwidth=bandwidth,
+ lag_id=lag_id,
+ forced_update=forced_update,
+ )
elif update_associations(client, latest_state, connection_id, lag_id):
return True, connection_id
@@ -293,53 +295,59 @@ def ensure_absent(client, connection_id):
def main():
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
+ state=dict(required=True, choices=["present", "absent"]),
name=dict(),
location=dict(),
- bandwidth=dict(choices=['1Gbps', '10Gbps']),
+ bandwidth=dict(choices=["1Gbps", "10Gbps"]),
link_aggregation_group=dict(),
connection_id=dict(),
- forced_update=dict(type='bool', default=False)
+ forced_update=dict(type="bool", default=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_one_of=[('connection_id', 'name')],
- required_if=[('state', 'present', ('location', 'bandwidth'))]
+ required_one_of=[("connection_id", "name")],
+ required_if=[("state", "present", ("location", "bandwidth"))],
)
- connection = module.client('directconnect')
+ connection = module.client("directconnect")
- state = module.params.get('state')
+ state = module.params.get("state")
try:
connection_id = connection_exists(
- connection,
- connection_id=module.params.get('connection_id'),
- connection_name=module.params.get('name')
+ connection, connection_id=module.params.get("connection_id"), connection_name=module.params.get("name")
)
- if not connection_id and module.params.get('connection_id'):
- module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
-
- if state == 'present':
- changed, connection_id = ensure_present(connection,
- connection_id=connection_id,
- connection_name=module.params.get('name'),
- location=module.params.get('location'),
- bandwidth=module.params.get('bandwidth'),
- lag_id=module.params.get('link_aggregation_group'),
- forced_update=module.params.get('forced_update'))
+ if not connection_id and module.params.get("connection_id"):
+ module.fail_json(
+ msg=f"The Direct Connect connection {module.params['connection_id']} does not exist.",
+ )
+
+ if state == "present":
+ changed, connection_id = ensure_present(
+ connection,
+ connection_id=connection_id,
+ connection_name=module.params.get("name"),
+ location=module.params.get("location"),
+ bandwidth=module.params.get("bandwidth"),
+ lag_id=module.params.get("link_aggregation_group"),
+ forced_update=module.params.get("forced_update"),
+ )
response = connection_status(connection, connection_id)
- elif state == 'absent':
+ elif state == "absent":
changed = ensure_absent(connection, connection_id)
response = {}
except DirectConnectError as e:
if e.last_traceback:
- module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
+ module.fail_json(
+ msg=e.msg,
+ exception=e.last_traceback,
+ **camel_dict_to_snake_dict(e.exception.response),
+ )
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py b/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py
index 1433b387b..b231f0e8f 100644
--- a/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_gateway.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: directconnect_gateway
author:
- Gobin Sougrakpam (@gobins)
@@ -19,10 +17,6 @@ description:
- Detaches Virtual Gateways to Direct Connect Gateway.
- Prior to release 5.0.0 this module was called C(community.aws.aws_direct_connect_gateway).
The usage did not change.
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
state:
description:
@@ -54,9 +48,13 @@ options:
- How long to wait for the association to be deleted.
type: int
default: 320
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new direct connect gateway attached to virtual private gateway
community.aws.directconnect_gateway:
state: present
@@ -71,9 +69,9 @@ EXAMPLES = '''
name: my-dx-gateway
amazon_asn: 7224
register: created_dxgw
-'''
+"""
-RETURN = '''
+RETURN = r"""
result:
description:
- The attributes of the Direct Connect Gateway
@@ -95,7 +93,7 @@ result:
owner_account:
description: The AWS account ID of the owner of the direct connect gateway.
type: str
-'''
+"""
import time
@@ -106,17 +104,18 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def dx_gateway_info(client, gateway_id, module):
try:
resp = client.describe_direct_connect_gateways(
- directConnectGatewayId=gateway_id)
+ directConnectGatewayId=gateway_id,
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to fetch gateway information.")
- if resp['directConnectGateways']:
- return resp['directConnectGateways'][0]
+ if resp["directConnectGateways"]:
+ return resp["directConnectGateways"][0]
def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
@@ -130,9 +129,10 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
client,
module,
gateway_id=gateway_id,
- virtual_gateway_id=virtual_gateway_id)
- if response['directConnectGatewayAssociations']:
- if response['directConnectGatewayAssociations'][0]['associationState'] == status:
+ virtual_gateway_id=virtual_gateway_id,
+ )
+ if response["directConnectGatewayAssociations"]:
+ if response["directConnectGatewayAssociations"][0]["associationState"] == status:
status_achieved = True
break
else:
@@ -149,17 +149,18 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
def associate_direct_connect_gateway(client, module, gateway_id):
params = dict()
- params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
+ params["virtual_gateway_id"] = module.params.get("virtual_gateway_id")
try:
response = client.create_direct_connect_gateway_association(
directConnectGatewayId=gateway_id,
- virtualGatewayId=params['virtual_gateway_id'])
+ virtualGatewayId=params["virtual_gateway_id"],
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, 'Failed to associate gateway')
+ module.fail_json_aws(e, "Failed to associate gateway")
- status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating')
+ status_achieved, dxgw = wait_for_status(client, module, gateway_id, params["virtual_gateway_id"], "associating")
if not status_achieved:
- module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console')
+ module.fail_json(msg="Error waiting for dxgw to attach to vpg - please check the AWS console")
result = response
return result
@@ -169,13 +170,14 @@ def delete_association(client, module, gateway_id, virtual_gateway_id):
try:
response = client.delete_direct_connect_gateway_association(
directConnectGatewayId=gateway_id,
- virtualGatewayId=virtual_gateway_id)
+ virtualGatewayId=virtual_gateway_id,
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete gateway association.")
- status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating')
+ status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, "disassociating")
if not status_achieved:
- module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console')
+ module.fail_json(msg="Error waiting for dxgw to detach from vpg - please check the AWS console")
result = response
return result
@@ -183,12 +185,13 @@ def delete_association(client, module, gateway_id, virtual_gateway_id):
def create_dx_gateway(client, module):
params = dict()
- params['name'] = module.params.get('name')
- params['amazon_asn'] = module.params.get('amazon_asn')
+ params["name"] = module.params.get("name")
+ params["amazon_asn"] = module.params.get("amazon_asn")
try:
response = client.create_direct_connect_gateway(
- directConnectGatewayName=params['name'],
- amazonSideAsn=int(params['amazon_asn']))
+ directConnectGatewayName=params["name"],
+ amazonSideAsn=int(params["amazon_asn"]),
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to create direct connect gateway.")
@@ -200,21 +203,21 @@ def find_dx_gateway(client, module, gateway_id=None):
params = dict()
gateways = list()
if gateway_id is not None:
- params['directConnectGatewayId'] = gateway_id
+ params["directConnectGatewayId"] = gateway_id
while True:
try:
resp = client.describe_direct_connect_gateways(**params)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to describe gateways")
- gateways.extend(resp['directConnectGateways'])
- if 'nextToken' in resp:
- params['nextToken'] = resp['nextToken']
+ gateways.extend(resp["directConnectGateways"])
+ if "nextToken" in resp:
+ params["nextToken"] = resp["nextToken"]
else:
break
if gateways != []:
count = 0
for gateway in gateways:
- if module.params.get('name') == gateway['directConnectGatewayName']:
+ if module.params.get("name") == gateway["directConnectGatewayName"]:
count += 1
return gateway
return None
@@ -224,7 +227,7 @@ def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None):
try:
if virtual_gateway_id is None:
resp = client.describe_direct_connect_gateway_associations(
- directConnectGatewayId=gateway_id
+ directConnectGatewayId=gateway_id,
)
else:
resp = client.describe_direct_connect_gateway_associations(
@@ -243,22 +246,20 @@ def ensure_present(client, module):
changed = False
params = dict()
result = dict()
- params['name'] = module.params.get('name')
- params['amazon_asn'] = module.params.get('amazon_asn')
- params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
+ params["name"] = module.params.get("name")
+ params["amazon_asn"] = module.params.get("amazon_asn")
+ params["virtual_gateway_id"] = module.params.get("virtual_gateway_id")
# check if a gateway matching our module args already exists
existing_dxgw = find_dx_gateway(client, module)
- if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted':
- gateway_id = existing_dxgw['directConnectGatewayId']
+ if existing_dxgw is not None and existing_dxgw["directConnectGatewayState"] != "deleted":
+ gateway_id = existing_dxgw["directConnectGatewayId"]
# if a gateway_id was provided, check if it is attach to the DXGW
- if params['virtual_gateway_id']:
+ if params["virtual_gateway_id"]:
resp = check_dxgw_association(
- client,
- module,
- gateway_id=gateway_id,
- virtual_gateway_id=params['virtual_gateway_id'])
+ client, module, gateway_id=gateway_id, virtual_gateway_id=params["virtual_gateway_id"]
+ )
if not resp["directConnectGatewayAssociations"]:
# attach the dxgw to the supplied virtual_gateway_id
associate_direct_connect_gateway(client, module, gateway_id)
@@ -269,26 +270,28 @@ def ensure_present(client, module):
resp = check_dxgw_association(client, module, gateway_id=gateway_id)
if resp["directConnectGatewayAssociations"]:
- for association in resp['directConnectGatewayAssociations']:
- if association['associationState'] not in ['disassociating', 'disassociated']:
+ for association in resp["directConnectGatewayAssociations"]:
+ if association["associationState"] not in ["disassociating", "disassociated"]:
delete_association(
client,
module,
gateway_id=gateway_id,
- virtual_gateway_id=association['virtualGatewayId'])
+ virtual_gateway_id=association["virtualGatewayId"],
+ )
else:
# create a new dxgw
new_dxgw = create_dx_gateway(client, module)
changed = True
- gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId']
+ gateway_id = new_dxgw["directConnectGateway"]["directConnectGatewayId"]
# if a vpc-id was supplied, attempt to attach it to the dxgw
- if params['virtual_gateway_id']:
+ if params["virtual_gateway_id"]:
associate_direct_connect_gateway(client, module, gateway_id)
- resp = check_dxgw_association(client,
- module,
- gateway_id=gateway_id
- )
+ resp = check_dxgw_association(
+ client,
+ module,
+ gateway_id=gateway_id,
+ )
if resp["directConnectGatewayAssociations"]:
changed = True
@@ -302,23 +305,23 @@ def ensure_absent(client, module):
changed = False
result = dict()
- dx_gateway_id = module.params.get('direct_connect_gateway_id')
+ dx_gateway_id = module.params.get("direct_connect_gateway_id")
existing_dxgw = find_dx_gateway(client, module, dx_gateway_id)
if existing_dxgw is not None:
- resp = check_dxgw_association(client, module,
- gateway_id=dx_gateway_id)
+ resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id)
if resp["directConnectGatewayAssociations"]:
- for association in resp['directConnectGatewayAssociations']:
- if association['associationState'] not in ['disassociating', 'disassociated']:
- delete_association(client, module,
- gateway_id=dx_gateway_id,
- virtual_gateway_id=association['virtualGatewayId'])
+ for association in resp["directConnectGatewayAssociations"]:
+ if association["associationState"] not in ["disassociating", "disassociated"]:
+ delete_association(
+ client,
+ module,
+ gateway_id=dx_gateway_id,
+ virtual_gateway_id=association["virtualGatewayId"],
+ )
# wait for deleting association
- timeout = time.time() + module.params.get('wait_timeout')
+ timeout = time.time() + module.params.get("wait_timeout")
while time.time() < timeout:
- resp = check_dxgw_association(client,
- module,
- gateway_id=dx_gateway_id)
+ resp = check_dxgw_association(client, module, gateway_id=dx_gateway_id)
if resp["directConnectGatewayAssociations"] != []:
time.sleep(15)
else:
@@ -326,43 +329,44 @@ def ensure_absent(client, module):
try:
resp = client.delete_direct_connect_gateway(
- directConnectGatewayId=dx_gateway_id
+ directConnectGatewayId=dx_gateway_id,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete gateway")
- result = resp['directConnectGateway']
+ result = resp["directConnectGateway"]
return changed
def main():
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
+ state=dict(default="present", choices=["present", "absent"]),
name=dict(),
amazon_asn=dict(),
virtual_gateway_id=dict(),
direct_connect_gateway_id=dict(),
- wait_timeout=dict(type='int', default=320),
+ wait_timeout=dict(type="int", default=320),
+ )
+ required_if = [("state", "present", ["name", "amazon_asn"]), ("state", "absent", ["direct_connect_gateway_id"])]
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
)
- required_if = [('state', 'present', ['name', 'amazon_asn']),
- ('state', 'absent', ['direct_connect_gateway_id'])]
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=required_if)
- state = module.params.get('state')
+ state = module.params.get("state")
try:
- client = module.client('directconnect')
+ client = module.client("directconnect")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- if state == 'present':
+ if state == "present":
(changed, results) = ensure_present(client, module)
- elif state == 'absent':
+ elif state == "absent":
changed = ensure_absent(client, module)
results = {}
module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py b/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py
index cc7122712..99224fee0 100644
--- a/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_link_aggregation_group.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: directconnect_link_aggregation_group
version_added: 1.0.0
@@ -17,10 +15,6 @@ description:
The usage did not change.
author:
- "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
state:
description:
@@ -81,9 +75,13 @@ options:
- The duration in seconds to wait if I(wait=true).
default: 120
type: int
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# create a Direct Connect connection
- community.aws.directconnect_link_aggregation_group:
@@ -93,7 +91,7 @@ EXAMPLES = """
bandwidth: 1Gbps
"""
-RETURN = """
+RETURN = r"""
changed:
type: str
description: Whether or not the LAG has changed.
@@ -163,8 +161,8 @@ region:
returned: when I(state=present)
"""
-import traceback
import time
+import traceback
try:
import botocore
@@ -173,13 +171,13 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_connection
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def lag_status(client, lag_id):
@@ -187,8 +185,8 @@ def lag_status(client, lag_id):
def lag_exists(client, lag_id=None, lag_name=None, verify=True):
- """ If verify=True, returns the LAG ID or None
- If verify=False, returns the LAG's data (or an empty dict)
+ """If verify=True, returns the LAG ID or None
+ If verify=False, returns the LAG's data (or an empty dict)
"""
try:
if lag_id:
@@ -202,26 +200,24 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True):
return {}
else:
failed_op = "Failed to describe DirectConnect link aggregation groups."
- raise DirectConnectError(msg=failed_op,
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(msg=failed_op, last_traceback=traceback.format_exc(), exception=e)
match = [] # List of LAG IDs that are exact matches
lag = [] # List of LAG data that are exact matches
# look for matching connections
- if len(response.get('lags', [])) == 1 and lag_id:
- if response['lags'][0]['lagState'] != 'deleted':
- match.append(response['lags'][0]['lagId'])
- lag.append(response['lags'][0])
+ if len(response.get("lags", [])) == 1 and lag_id:
+ if response["lags"][0]["lagState"] != "deleted":
+ match.append(response["lags"][0]["lagId"])
+ lag.append(response["lags"][0])
else:
- for each in response.get('lags', []):
- if each['lagState'] != 'deleted':
+ for each in response.get("lags", []):
+ if each["lagState"] != "deleted":
if not lag_id:
- if lag_name == each['lagName']:
- match.append(each['lagId'])
+ if lag_name == each["lagName"]:
+ match.append(each["lagId"])
else:
- match.append(each['lagId'])
+ match.append(each["lagId"])
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
@@ -239,36 +235,41 @@ def lag_exists(client, lag_id=None, lag_name=None, verify=True):
def create_lag(client, num_connections, location, bandwidth, name, connection_id):
if not name:
- raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
- last_traceback=None,
- exception="")
-
- parameters = dict(numberOfConnections=num_connections,
- location=location,
- connectionsBandwidth=bandwidth,
- lagName=name)
+ raise DirectConnectError(
+ msg="Failed to create a Direct Connect link aggregation group: name required.",
+ last_traceback=None,
+ exception="",
+ )
+
+ parameters = dict(
+ numberOfConnections=num_connections, location=location, connectionsBandwidth=bandwidth, lagName=name
+ )
if connection_id:
parameters.update(connectionId=connection_id)
try:
lag = client.create_lag(**parameters)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Failed to create DirectConnect link aggregation group {name}",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
- return lag['lagId']
+ return lag["lagId"]
def delete_lag(client, lag_id):
try:
client.delete_lag(lagId=lag_id)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Failed to delete Direct Connect link aggregation group {lag_id}.",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
-@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
+@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=["DirectConnectClientException"])
def _update_lag(client, lag_id, lag_name, min_links):
params = {}
if min_links:
@@ -284,10 +285,9 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_
if min_links and min_links > num_connections:
raise DirectConnectError(
- msg="The number of connections {0} must be greater than the minimum number of links "
- "{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
+ msg=f"The number of connections {num_connections} must be greater than the minimum number of links {min_links} to update the LAG {lag_id}",
last_traceback=None,
- exception=None
+ exception=None,
)
while True:
@@ -296,27 +296,29 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_
except botocore.exceptions.ClientError as e:
if wait and time.time() - start <= wait_timeout:
continue
- msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
- if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
- msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
- raise DirectConnectError(msg=msg,
- last_traceback=traceback.format_exc(),
- exception=e)
+ msg = f"Failed to update Direct Connect link aggregation group {lag_id}."
+ if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]:
+ msg += f"Unable to set the min number of links to {min_links} while the LAG connections are being requested"
+ raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e)
else:
break
def lag_changed(current_status, name, min_links):
- """ Determines if a modifiable link aggregation group attribute has been modified. """
- return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
+ """Determines if a modifiable link aggregation group attribute has been modified."""
+ return (name and name != current_status["lagName"]) or (min_links and min_links != current_status["minimumLinks"])
-def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
+def ensure_present(
+ client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout
+):
exists = lag_exists(client, lag_id, lag_name)
if not exists and lag_id:
- raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
- last_traceback=None,
- exception="")
+ raise DirectConnectError(
+ msg=f"The Direct Connect link aggregation group {lag_id} does not exist.",
+ last_traceback=None,
+ exception="",
+ )
# the connection is found; get the latest state and see if it needs to be updated
if exists:
@@ -338,27 +340,31 @@ def describe_virtual_interfaces(client, lag_id):
try:
response = client.describe_virtual_interfaces(connectionId=lag_id)
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
- return response.get('virtualInterfaces', [])
+ raise DirectConnectError(
+ msg=f"Failed to describe any virtual interfaces associated with LAG: {lag_id}",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
+ return response.get("virtualInterfaces", [])
def get_connections_and_virtual_interfaces(client, lag_id):
virtual_interfaces = describe_virtual_interfaces(client, lag_id)
- connections = lag_status(client, lag_id=lag_id).get('connections', [])
+ connections = lag_status(client, lag_id=lag_id).get("connections", [])
return virtual_interfaces, connections
def disassociate_vis(client, lag_id, virtual_interfaces):
for vi in virtual_interfaces:
- delete_virtual_interface(client, vi['virtualInterfaceId'])
+ delete_virtual_interface(client, vi["virtualInterfaceId"])
try:
- response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
+ response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"])
except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
+ raise DirectConnectError(
+ msg=f"Could not delete virtual interface {vi} to delete link aggregation group {lag_id}.",
+ last_traceback=traceback.format_exc(),
+ exception=e,
+ )
def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
@@ -372,32 +378,41 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia
virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
# If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
- if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
- raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
- "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
- "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
- "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
- last_traceback=None,
- exception=None)
+ if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete:
+ raise DirectConnectError(
+ msg=(
+ "There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG"
+ f" {lag_id}. To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces"
+ " they will be deleted). Optionally, to ensure hosted connections are deleted after disassociation use"
+ " delete_with_disassociation: True and wait: True (as Virtual Interfaces may take a few moments to"
+ " delete)"
+ ),
+ last_traceback=None,
+ exception=None,
+ )
# update min_links to be 0 so we can remove the LAG
update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
# if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
for connection in connections:
- disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
+ disassociate_connection_and_lag(client, connection["connectionId"], lag_id)
if delete_with_disassociation:
- delete_connection(client, connection['connectionId'])
+ delete_connection(client, connection["connectionId"])
for vi in virtual_interfaces:
- delete_virtual_interface(client, vi['virtualInterfaceId'])
+ delete_virtual_interface(client, vi["virtualInterfaceId"])
start_time = time.time()
while True:
try:
delete_lag(client, lag_id)
except DirectConnectError as e:
- if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
+ if (
+ ("until its Virtual Interfaces are deleted" in e.exception)
+ and (time.time() - start_time < wait_timeout)
+ and wait
+ ):
continue
else:
return True
@@ -405,54 +420,58 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia
def main():
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
+ state=dict(required=True, choices=["present", "absent"]),
name=dict(),
link_aggregation_group_id=dict(),
- num_connections=dict(type='int'),
- min_links=dict(type='int'),
+ num_connections=dict(type="int"),
+ min_links=dict(type="int"),
location=dict(),
bandwidth=dict(),
connection_id=dict(),
- delete_with_disassociation=dict(type='bool', default=False),
- force_delete=dict(type='bool', default=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=120),
+ delete_with_disassociation=dict(type="bool", default=False),
+ force_delete=dict(type="bool", default=False),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=120),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_one_of=[('link_aggregation_group_id', 'name')],
- required_if=[('state', 'present', ('location', 'bandwidth'))],
+ required_one_of=[("link_aggregation_group_id", "name")],
+ required_if=[("state", "present", ("location", "bandwidth"))],
)
try:
- connection = module.client('directconnect')
+ connection = module.client("directconnect")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- state = module.params.get('state')
+ state = module.params.get("state")
response = {}
try:
- if state == 'present':
- changed, lag_id = ensure_present(connection,
- num_connections=module.params.get("num_connections"),
- lag_id=module.params.get("link_aggregation_group_id"),
- lag_name=module.params.get("name"),
- location=module.params.get("location"),
- bandwidth=module.params.get("bandwidth"),
- connection_id=module.params.get("connection_id"),
- min_links=module.params.get("min_links"),
- wait=module.params.get("wait"),
- wait_timeout=module.params.get("wait_timeout"))
+ if state == "present":
+ changed, lag_id = ensure_present(
+ connection,
+ num_connections=module.params.get("num_connections"),
+ lag_id=module.params.get("link_aggregation_group_id"),
+ lag_name=module.params.get("name"),
+ location=module.params.get("location"),
+ bandwidth=module.params.get("bandwidth"),
+ connection_id=module.params.get("connection_id"),
+ min_links=module.params.get("min_links"),
+ wait=module.params.get("wait"),
+ wait_timeout=module.params.get("wait_timeout"),
+ )
response = lag_status(connection, lag_id)
elif state == "absent":
- changed = ensure_absent(connection,
- lag_id=module.params.get("link_aggregation_group_id"),
- lag_name=module.params.get("name"),
- force_delete=module.params.get("force_delete"),
- delete_with_disassociation=module.params.get("delete_with_disassociation"),
- wait=module.params.get('wait'),
- wait_timeout=module.params.get('wait_timeout'))
+ changed = ensure_absent(
+ connection,
+ lag_id=module.params.get("link_aggregation_group_id"),
+ lag_name=module.params.get("name"),
+ force_delete=module.params.get("force_delete"),
+ delete_with_disassociation=module.params.get("delete_with_disassociation"),
+ wait=module.params.get("wait"),
+ wait_timeout=module.params.get("wait_timeout"),
+ )
except DirectConnectError as e:
if e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
@@ -462,5 +481,5 @@ def main():
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py b/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py
index 059cd7425..da76d5737 100644
--- a/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py
+++ b/ansible_collections/community/aws/plugins/modules/directconnect_virtual_interface.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: directconnect_virtual_interface
version_added: 1.0.0
@@ -86,12 +84,12 @@ options:
- The virtual interface ID.
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-RETURN = r'''
+RETURN = r"""
address_family:
description: The address family for the BGP peer.
returned: always
@@ -228,9 +226,9 @@ vlan:
returned: always
type: int
sample: 100
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
- name: create an association between a LAG and connection
community.aws.directconnect_virtual_interface:
@@ -244,81 +242,87 @@ EXAMPLES = r'''
state: absent
connection_id: dxcon-XXXXXXXX
virtual_interface_id: dxv-XXXXXXXX
-
-'''
+"""
import traceback
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
# handled by AnsibleAWSModule
pass
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError
from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import delete_virtual_interface
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def try_except_ClientError(failure_msg):
- '''
- Wrapper for boto3 calls that uses AWSRetry and handles exceptions
- '''
+ """
+ Wrapper for boto3 calls that uses AWSRetry and handles exceptions
+ """
+
def wrapper(f):
def run_func(*args, **kwargs):
try:
- result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs)
+ result = AWSRetry.jittered_backoff(
+ retries=8, delay=5, catch_extra_error_codes=["DirectConnectClientException"]
+ )(f)(*args, **kwargs)
except (ClientError, BotoCoreError) as e:
raise DirectConnectError(failure_msg, traceback.format_exc(), e)
return result
+
return run_func
+
return wrapper
def find_unique_vi(client, connection_id, virtual_interface_id, name):
- '''
- Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
- If multiple matches are found False is returned. If no matches are found None is returned.
- '''
+ """
+ Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
+ If multiple matches are found False is returned. If no matches are found None is returned.
+ """
# Get the virtual interfaces, filtering by the ID if provided.
vi_params = {}
if virtual_interface_id:
- vi_params = {'virtualInterfaceId': virtual_interface_id}
+ vi_params = {"virtualInterfaceId": virtual_interface_id}
- virtual_interfaces = try_except_ClientError(
- failure_msg="Failed to describe virtual interface")(
- client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces')
+ virtual_interfaces = try_except_ClientError(failure_msg="Failed to describe virtual interface")(
+ client.describe_virtual_interfaces
+ )(**vi_params).get("virtualInterfaces")
# Remove deleting/deleted matches from the results.
- virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')]
+ virtual_interfaces = [vi for vi in virtual_interfaces if vi["virtualInterfaceState"] not in ("deleting", "deleted")]
matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id)
return exact_match(matching_virtual_interfaces)
def exact_match(virtual_interfaces):
- '''
- Returns the virtual interface ID if one was found,
- None if the virtual interface ID needs to be created,
- False if an exact match was not found
- '''
+ """
+ Returns the virtual interface ID if one was found,
+ None if the virtual interface ID needs to be created,
+ False if an exact match was not found
+ """
if not virtual_interfaces:
return None
if len(virtual_interfaces) == 1:
- return virtual_interfaces[0]['virtualInterfaceId']
+ return virtual_interfaces[0]["virtualInterfaceId"]
else:
return False
def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
- '''
- Filters the available virtual interfaces to try to find a unique match
- '''
+ """
+ Filters the available virtual interfaces to try to find a unique match
+ """
# Filter by name if provided.
if name:
matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name)
@@ -339,52 +343,56 @@ def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id):
- '''
- Return virtual interfaces that have the connection_id associated
- '''
- return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id]
+ """
+ Return virtual interfaces that have the connection_id associated
+ """
+ return [vi for vi in virtual_interfaces if vi["connectionId"] == connection_id]
def find_virtual_interface_by_name(virtual_interfaces, name):
- '''
- Return virtual interfaces that match the provided name
- '''
- return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name]
+ """
+ Return virtual interfaces that match the provided name
+ """
+ return [vi for vi in virtual_interfaces if vi["virtualInterfaceName"] == name]
def vi_state(client, virtual_interface_id):
- '''
- Returns the state of the virtual interface.
- '''
- err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id)
- vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id)
- return vi['virtualInterfaces'][0]
+ """
+ Returns the state of the virtual interface.
+ """
+ err_msg = f"Failed to describe virtual interface: {virtual_interface_id}"
+ vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(
+ virtualInterfaceId=virtual_interface_id
+ )
+ return vi["virtualInterfaces"][0]
def assemble_params_for_creating_vi(params):
- '''
- Returns kwargs to use in the call to create the virtual interface
-
- Params for public virtual interfaces:
- virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
- Params for private virtual interfaces:
- virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
- '''
-
- public = params['public']
- name = params['name']
- vlan = params['vlan']
- bgp_asn = params['bgp_asn']
- auth_key = params['authentication_key']
- amazon_addr = params['amazon_address']
- customer_addr = params['customer_address']
- family_addr = params['address_type']
- cidr = params['cidr']
- virtual_gateway_id = params['virtual_gateway_id']
- direct_connect_gateway_id = params['direct_connect_gateway_id']
+ """
+ Returns kwargs to use in the call to create the virtual interface
+
+ Params for public virtual interfaces:
+ virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
+ Params for private virtual interfaces:
+ virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
+ """
+
+ public = params["public"]
+ name = params["name"]
+ vlan = params["vlan"]
+ bgp_asn = params["bgp_asn"]
+ auth_key = params["authentication_key"]
+ amazon_addr = params["amazon_address"]
+ customer_addr = params["customer_address"]
+ family_addr = params["address_type"]
+ cidr = params["cidr"]
+ virtual_gateway_id = params["virtual_gateway_id"]
+ direct_connect_gateway_id = params["direct_connect_gateway_id"]
parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn)
- opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr)
+ opt_params = dict(
+ authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr
+ )
for name, value in opt_params.items():
if value:
@@ -392,68 +400,74 @@ def assemble_params_for_creating_vi(params):
# virtual interface type specific parameters
if public and cidr:
- parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr]
+ parameters["routeFilterPrefixes"] = [{"cidr": c} for c in cidr]
if not public:
if virtual_gateway_id:
- parameters['virtualGatewayId'] = virtual_gateway_id
+ parameters["virtualGatewayId"] = virtual_gateway_id
elif direct_connect_gateway_id:
- parameters['directConnectGatewayId'] = direct_connect_gateway_id
+ parameters["directConnectGatewayId"] = direct_connect_gateway_id
return parameters
def create_vi(client, public, associated_id, creation_params):
- '''
- :param public: a boolean
- :param associated_id: a link aggregation group ID or connection ID to associate
- with the virtual interface.
- :param creation_params: a dict of parameters to use in the AWS SDK call
- :return The ID of the created virtual interface
- '''
+ """
+ :param public: a boolean
+ :param associated_id: a link aggregation group ID or connection ID to associate
+ with the virtual interface.
+ :param creation_params: a dict of parameters to use in the AWS SDK call
+ :return The ID of the created virtual interface
+ """
err_msg = "Failed to create virtual interface"
if public:
- vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id,
- newPublicVirtualInterface=creation_params)
+ vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(
+ connectionId=associated_id, newPublicVirtualInterface=creation_params
+ )
else:
- vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id,
- newPrivateVirtualInterface=creation_params)
- return vi['virtualInterfaceId']
+ vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(
+ connectionId=associated_id, newPrivateVirtualInterface=creation_params
+ )
+ return vi["virtualInterfaceId"]
def modify_vi(client, virtual_interface_id, connection_id):
- '''
- Associate a new connection ID
- '''
- err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id)
- try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id,
- connectionId=connection_id)
+ """
+ Associate a new connection ID
+ """
+ err_msg = f"Unable to associate {connection_id} with virtual interface {virtual_interface_id}"
+ try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(
+ virtualInterfaceId=virtual_interface_id, connectionId=connection_id
+ )
def needs_modification(client, virtual_interface_id, connection_id):
- '''
- Determine if the associated connection ID needs to be updated
- '''
- return vi_state(client, virtual_interface_id).get('connectionId') != connection_id
+ """
+ Determine if the associated connection ID needs to be updated
+ """
+ return vi_state(client, virtual_interface_id).get("connectionId") != connection_id
def ensure_state(connection, module):
changed = False
- state = module.params['state']
- connection_id = module.params['id_to_associate']
- public = module.params['public']
- name = module.params['name']
+ state = module.params["state"]
+ connection_id = module.params["id_to_associate"]
+ public = module.params["public"]
+ name = module.params["name"]
- virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name)
+ virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get("virtual_interface_id"), name)
if virtual_interface_id is False:
- module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
- "and connection_id options if applicable to find a unique match.")
-
- if state == 'present':
+ module.fail_json(
+ msg=(
+ "Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
+ "and connection_id options if applicable to find a unique match."
+ )
+ )
- if not virtual_interface_id and module.params['virtual_interface_id']:
- module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id']))
+ if state == "present":
+ if not virtual_interface_id and module.params["virtual_interface_id"]:
+ module.fail_json(msg=f"The virtual interface {module.params['virtual_interface_id']} does not exist.")
elif not virtual_interface_id:
assembled_params = assemble_params_for_creating_vi(module.params)
@@ -478,31 +492,35 @@ def ensure_state(connection, module):
def main():
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
- id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']),
- public=dict(type='bool'),
+ state=dict(required=True, choices=["present", "absent"]),
+ id_to_associate=dict(required=True, aliases=["link_aggregation_group_id", "connection_id"]),
+ public=dict(type="bool"),
name=dict(),
- vlan=dict(type='int', default=100),
- bgp_asn=dict(type='int', default=65000),
+ vlan=dict(type="int", default=100),
+ bgp_asn=dict(type="int", default=65000),
authentication_key=dict(no_log=True),
amazon_address=dict(),
customer_address=dict(),
address_type=dict(),
- cidr=dict(type='list', elements='str'),
+ cidr=dict(type="list", elements="str"),
virtual_gateway_id=dict(),
direct_connect_gateway_id=dict(),
- virtual_interface_id=dict()
+ virtual_interface_id=dict(),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_one_of=[['virtual_interface_id', 'name']],
- required_if=[['state', 'present', ['public']],
- ['public', True, ['amazon_address']],
- ['public', True, ['customer_address']],
- ['public', True, ['cidr']]],
- mutually_exclusive=[['virtual_gateway_id', 'direct_connect_gateway_id']])
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[["virtual_interface_id", "name"]],
+ required_if=[
+ ["state", "present", ["public"]],
+ ["public", True, ["amazon_address"]],
+ ["public", True, ["customer_address"]],
+ ["public", True, ["cidr"]],
+ ],
+ mutually_exclusive=[["virtual_gateway_id", "direct_connect_gateway_id"]],
+ )
- connection = module.client('directconnect')
+ connection = module.client("directconnect")
try:
changed, latest_state = ensure_state(connection, module)
@@ -515,5 +533,5 @@ def main():
module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/dms_endpoint.py b/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
index fb899d669..f67a1263e 100644
--- a/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
+++ b/ansible_collections/community/aws/plugins/modules/dms_endpoint.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: dms_endpoint
version_added: 1.0.0
@@ -143,13 +141,13 @@ options:
author:
- "Rui Moreira (@ruimoreira)"
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
- amazon.aws.tags
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details
- name: Endpoint Creation
community.aws.dms_endpoint:
@@ -164,9 +162,9 @@ EXAMPLES = '''
databasename: 'testdb'
sslmode: none
wait: false
-'''
+"""
-RETURN = '''
+RETURN = r"""
endpoint:
description:
- A description of the DMS endpoint.
@@ -325,7 +323,7 @@ endpoint:
- Additional settings for Redis endpoints.
type: dict
returned: when the I(endpoint_type) is C(redshift)
-'''
+"""
try:
import botocore
@@ -334,20 +332,21 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
backoff_params = dict(retries=5, delay=1, backoff=1.5)
@AWSRetry.jittered_backoff(**backoff_params)
def dms_describe_tags(connection, **params):
- """ checks if the endpoint exists """
- tags = connection.list_tags_for_resource(**params).get('TagList', [])
+ """checks if the endpoint exists"""
+ tags = connection.list_tags_for_resource(**params).get("TagList", [])
return boto3_tag_list_to_ansible_dict(tags)
@@ -355,15 +354,14 @@ def dms_describe_tags(connection, **params):
def dms_describe_endpoints(connection, **params):
try:
endpoints = connection.describe_endpoints(**params)
- except is_boto3_error_code('ResourceNotFoundFault'):
+ except is_boto3_error_code("ResourceNotFoundFault"):
return None
- return endpoints.get('Endpoints', None)
+ return endpoints.get("Endpoints", None)
def describe_endpoint(connection, endpoint_identifier):
- """ checks if the endpoint exists """
- endpoint_filter = dict(Name='endpoint-id',
- Values=[endpoint_identifier])
+ """checks if the endpoint exists"""
+ endpoint_filter = dict(Name="endpoint-id", Values=[endpoint_identifier])
try:
endpoints = dms_describe_endpoints(connection, Filters=[endpoint_filter])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -374,8 +372,8 @@ def describe_endpoint(connection, endpoint_identifier):
endpoint = endpoints[0]
try:
- tags = dms_describe_tags(connection, ResourceArn=endpoint['EndpointArn'])
- endpoint['tags'] = tags
+ tags = dms_describe_tags(connection, ResourceArn=endpoint["EndpointArn"])
+ endpoint["tags"] = tags
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe the DMS endpoint tags")
return endpoint
@@ -384,7 +382,7 @@ def describe_endpoint(connection, endpoint_identifier):
@AWSRetry.jittered_backoff(**backoff_params)
def dms_delete_endpoint(client, **params):
"""deletes the DMS endpoint based on the EndpointArn"""
- if module.params.get('wait'):
+ if module.params.get("wait"):
return delete_dms_endpoint(client)
else:
return client.delete_endpoint(**params)
@@ -392,19 +390,19 @@ def dms_delete_endpoint(client, **params):
@AWSRetry.jittered_backoff(**backoff_params)
def dms_create_endpoint(client, **params):
- """ creates the DMS endpoint"""
+ """creates the DMS endpoint"""
return client.create_endpoint(**params)
@AWSRetry.jittered_backoff(**backoff_params)
def dms_modify_endpoint(client, **params):
- """ updates the endpoint"""
+ """updates the endpoint"""
return client.modify_endpoint(**params)
@AWSRetry.jittered_backoff(**backoff_params)
def get_endpoint_deleted_waiter(client):
- return client.get_waiter('endpoint_deleted')
+ return client.get_waiter("endpoint_deleted")
@AWSRetry.jittered_backoff(**backoff_params)
@@ -418,32 +416,22 @@ def dms_add_tags(client, **params):
def endpoint_exists(endpoint):
- """ Returns boolean based on the existence of the endpoint
+ """Returns boolean based on the existence of the endpoint
:param endpoint: dict containing the described endpoint
:return: bool
"""
- return bool(len(endpoint['Endpoints']))
+ return bool(len(endpoint["Endpoints"]))
def delete_dms_endpoint(connection, endpoint_arn):
try:
- delete_arn = dict(
- EndpointArn=endpoint_arn
- )
- if module.params.get('wait'):
-
+ delete_arn = dict(EndpointArn=endpoint_arn)
+ if module.params.get("wait"):
delete_output = connection.delete_endpoint(**delete_arn)
delete_waiter = get_endpoint_deleted_waiter(connection)
delete_waiter.wait(
- Filters=[{
- 'Name': 'endpoint-arn',
- 'Values': [endpoint_arn]
-
- }],
- WaiterConfig={
- 'Delay': module.params.get('timeout'),
- 'MaxAttempts': module.params.get('retries')
- }
+ Filters=[{"Name": "endpoint-arn", "Values": [endpoint_arn]}],
+ WaiterConfig={"Delay": module.params.get("timeout"), "MaxAttempts": module.params.get("retries")},
)
return delete_output
else:
@@ -458,71 +446,62 @@ def create_module_params():
:return: dict
"""
endpoint_parameters = dict(
- EndpointIdentifier=module.params.get('endpointidentifier'),
- EndpointType=module.params.get('endpointtype'),
- EngineName=module.params.get('enginename'),
- Username=module.params.get('username'),
- Password=module.params.get('password'),
- ServerName=module.params.get('servername'),
- Port=module.params.get('port'),
- DatabaseName=module.params.get('databasename'),
- SslMode=module.params.get('sslmode')
+ EndpointIdentifier=module.params.get("endpointidentifier"),
+ EndpointType=module.params.get("endpointtype"),
+ EngineName=module.params.get("enginename"),
+ Username=module.params.get("username"),
+ Password=module.params.get("password"),
+ ServerName=module.params.get("servername"),
+ Port=module.params.get("port"),
+ DatabaseName=module.params.get("databasename"),
+ SslMode=module.params.get("sslmode"),
)
- if module.params.get('EndpointArn'):
- endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn')
- if module.params.get('certificatearn'):
- endpoint_parameters['CertificateArn'] = \
- module.params.get('certificatearn')
+ if module.params.get("EndpointArn"):
+ endpoint_parameters["EndpointArn"] = module.params.get("EndpointArn")
+ if module.params.get("certificatearn"):
+ endpoint_parameters["CertificateArn"] = module.params.get("certificatearn")
- if module.params.get('dmstransfersettings'):
- endpoint_parameters['DmsTransferSettings'] = \
- module.params.get('dmstransfersettings')
+ if module.params.get("dmstransfersettings"):
+ endpoint_parameters["DmsTransferSettings"] = module.params.get("dmstransfersettings")
- if module.params.get('extraconnectionattributes'):
- endpoint_parameters['ExtraConnectionAttributes'] =\
- module.params.get('extraconnectionattributes')
+ if module.params.get("extraconnectionattributes"):
+ endpoint_parameters["ExtraConnectionAttributes"] = module.params.get("extraconnectionattributes")
- if module.params.get('kmskeyid'):
- endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid')
+ if module.params.get("kmskeyid"):
+ endpoint_parameters["KmsKeyId"] = module.params.get("kmskeyid")
- if module.params.get('tags'):
- endpoint_parameters['Tags'] = module.params.get('tags')
+ if module.params.get("tags"):
+ endpoint_parameters["Tags"] = module.params.get("tags")
- if module.params.get('serviceaccessrolearn'):
- endpoint_parameters['ServiceAccessRoleArn'] = \
- module.params.get('serviceaccessrolearn')
+ if module.params.get("serviceaccessrolearn"):
+ endpoint_parameters["ServiceAccessRoleArn"] = module.params.get("serviceaccessrolearn")
- if module.params.get('externaltabledefinition'):
- endpoint_parameters['ExternalTableDefinition'] = \
- module.params.get('externaltabledefinition')
+ if module.params.get("externaltabledefinition"):
+ endpoint_parameters["ExternalTableDefinition"] = module.params.get("externaltabledefinition")
- if module.params.get('dynamodbsettings'):
- endpoint_parameters['DynamoDbSettings'] = \
- module.params.get('dynamodbsettings')
+ if module.params.get("dynamodbsettings"):
+ endpoint_parameters["DynamoDbSettings"] = module.params.get("dynamodbsettings")
- if module.params.get('s3settings'):
- endpoint_parameters['S3Settings'] = module.params.get('s3settings')
+ if module.params.get("s3settings"):
+ endpoint_parameters["S3Settings"] = module.params.get("s3settings")
- if module.params.get('mongodbsettings'):
- endpoint_parameters['MongoDbSettings'] = \
- module.params.get('mongodbsettings')
+ if module.params.get("mongodbsettings"):
+ endpoint_parameters["MongoDbSettings"] = module.params.get("mongodbsettings")
- if module.params.get('kinesissettings'):
- endpoint_parameters['KinesisSettings'] = \
- module.params.get('kinesissettings')
+ if module.params.get("kinesissettings"):
+ endpoint_parameters["KinesisSettings"] = module.params.get("kinesissettings")
- if module.params.get('elasticsearchsettings'):
- endpoint_parameters['ElasticsearchSettings'] = \
- module.params.get('elasticsearchsettings')
+ if module.params.get("elasticsearchsettings"):
+ endpoint_parameters["ElasticsearchSettings"] = module.params.get("elasticsearchsettings")
- if module.params.get('wait'):
- endpoint_parameters['wait'] = module.boolean(module.params.get('wait'))
+ if module.params.get("wait"):
+ endpoint_parameters["wait"] = module.boolean(module.params.get("wait"))
- if module.params.get('timeout'):
- endpoint_parameters['timeout'] = module.params.get('timeout')
+ if module.params.get("timeout"):
+ endpoint_parameters["timeout"] = module.params.get("timeout")
- if module.params.get('retries'):
- endpoint_parameters['retries'] = module.params.get('retries')
+ if module.params.get("retries"):
+ endpoint_parameters["retries"] = module.params.get("retries")
return endpoint_parameters
@@ -538,14 +517,16 @@ def compare_params(param_described):
param_described = dict(param_described)
modparams = create_module_params()
# modify can't update tags
- param_described.pop('Tags', None)
- modparams.pop('Tags', None)
+ param_described.pop("Tags", None)
+ modparams.pop("Tags", None)
changed = False
for paramname in modparams:
- if paramname == 'Password' or paramname in param_described \
- and param_described[paramname] == modparams[paramname] or \
- str(param_described[paramname]).lower() \
- == modparams[paramname]:
+ if (
+ paramname == "Password"
+ or paramname in param_described
+ and param_described[paramname] == modparams[paramname]
+ or str(param_described[paramname]).lower() == modparams[paramname]
+ ):
pass
else:
changed = True
@@ -553,25 +534,24 @@ def compare_params(param_described):
def modify_dms_endpoint(connection, endpoint):
- arn = endpoint['EndpointArn']
+ arn = endpoint["EndpointArn"]
try:
params = create_module_params()
# modify can't update tags
- params.pop('Tags', None)
+ params.pop("Tags", None)
return dms_modify_endpoint(connection, EndpointArn=arn, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update DMS endpoint.", params=params)
def ensure_tags(connection, endpoint):
- desired_tags = module.params.get('tags', None)
+ desired_tags = module.params.get("tags", None)
if desired_tags is None:
return False
- current_tags = endpoint.get('tags', {})
+ current_tags = endpoint.get("tags", {})
- tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags,
- module.params.get('purge_tags'))
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, module.params.get("purge_tags"))
if not tags_to_remove and not tags_to_add:
return False
@@ -579,7 +559,7 @@ def ensure_tags(connection, endpoint):
if module.check_mode:
return True
- arn = endpoint.get('EndpointArn')
+ arn = endpoint.get("EndpointArn")
try:
if tags_to_remove:
@@ -609,36 +589,49 @@ def create_dms_endpoint(connection):
def main():
argument_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
+ state=dict(choices=["present", "absent"], default="present"),
endpointidentifier=dict(required=True),
- endpointtype=dict(choices=['source', 'target']),
- enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb',
- 'aurora', 'redshift', 's3', 'db2', 'azuredb',
- 'sybase', 'dynamodb', 'mongodb', 'sqlserver'],
- required=False),
+ endpointtype=dict(choices=["source", "target"]),
+ enginename=dict(
+ choices=[
+ "mysql",
+ "oracle",
+ "postgres",
+ "mariadb",
+ "aurora",
+ "redshift",
+ "s3",
+ "db2",
+ "azuredb",
+ "sybase",
+ "dynamodb",
+ "mongodb",
+ "sqlserver",
+ ],
+ required=False,
+ ),
username=dict(),
password=dict(no_log=True),
servername=dict(),
- port=dict(type='int'),
+ port=dict(type="int"),
databasename=dict(),
extraconnectionattributes=dict(),
kmskeyid=dict(no_log=False),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
certificatearn=dict(),
- sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'],
- default='none'),
+ sslmode=dict(choices=["none", "require", "verify-ca", "verify-full"], default="none"),
serviceaccessrolearn=dict(),
externaltabledefinition=dict(),
- dynamodbsettings=dict(type='dict'),
- s3settings=dict(type='dict'),
- dmstransfersettings=dict(type='dict'),
- mongodbsettings=dict(type='dict'),
- kinesissettings=dict(type='dict'),
- elasticsearchsettings=dict(type='dict'),
- wait=dict(type='bool', default=False),
- timeout=dict(type='int'),
- retries=dict(type='int')
+ dynamodbsettings=dict(type="dict"),
+ s3settings=dict(type="dict"),
+ dmstransfersettings=dict(type="dict"),
+ mongodbsettings=dict(type="dict"),
+ kinesissettings=dict(type="dict"),
+ elasticsearchsettings=dict(type="dict"),
+ wait=dict(type="bool", default=False),
+ timeout=dict(type="int"),
+ retries=dict(type="int"),
)
global module
module = AnsibleAWSModule(
@@ -650,49 +643,48 @@ def main():
["wait", "True", ["timeout"]],
["wait", "True", ["retries"]],
],
- supports_check_mode=False
+ supports_check_mode=False,
)
exit_message = None
changed = False
- state = module.params.get('state')
+ state = module.params.get("state")
- dmsclient = module.client('dms')
- endpoint = describe_endpoint(dmsclient,
- module.params.get('endpointidentifier'))
- if state == 'present':
+ dmsclient = module.client("dms")
+ endpoint = describe_endpoint(dmsclient, module.params.get("endpointidentifier"))
+ if state == "present":
if endpoint:
changed |= ensure_tags(dmsclient, endpoint)
params_changed = compare_params(endpoint)
if params_changed:
updated_dms = modify_dms_endpoint(dmsclient, endpoint)
exit_message = updated_dms
- endpoint = exit_message.get('Endpoint')
+ endpoint = exit_message.get("Endpoint")
changed = True
else:
exit_message = "Endpoint Already Exists"
else:
exit_message = create_dms_endpoint(dmsclient)
- endpoint = exit_message.get('Endpoint')
+ endpoint = exit_message.get("Endpoint")
changed = True
if changed:
# modify and create don't return tags
- tags = dms_describe_tags(dmsclient, ResourceArn=endpoint['EndpointArn'])
- endpoint['tags'] = tags
- elif state == 'absent':
+ tags = dms_describe_tags(dmsclient, ResourceArn=endpoint["EndpointArn"])
+ endpoint["tags"] = tags
+ elif state == "absent":
if endpoint:
- delete_results = delete_dms_endpoint(dmsclient, endpoint['EndpointArn'])
+ delete_results = delete_dms_endpoint(dmsclient, endpoint["EndpointArn"])
exit_message = delete_results
endpoint = None
changed = True
else:
changed = False
- exit_message = 'DMS Endpoint does not exist'
+ exit_message = "DMS Endpoint does not exist"
- endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=['tags'])
+ endpoint = camel_dict_to_snake_dict(endpoint or {}, ignore_list=["tags"])
module.exit_json(changed=changed, endpoint=endpoint, msg=exit_message)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py b/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
index fb5d59613..772a54aa1 100644
--- a/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
+++ b/ansible_collections/community/aws/plugins/modules/dms_replication_subnet_group.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: dms_replication_subnet_group
version_added: 1.0.0
@@ -43,29 +41,29 @@ options:
author:
- "Rui Moreira (@ruimoreira)"
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- community.aws.dms_replication_subnet_group:
state: present
identifier: "dev-sngroup"
description: "Development Subnet Group asdasdas"
- subnet_ids: ['subnet-id1','subnet-id2']
-'''
+ subnet_ids: ['subnet-id1', 'subnet-id2']
+"""
-RETURN = ''' # '''
+RETURN = r""" # """
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
backoff_params = dict(retries=5, delay=1, backoff=1.5)
@@ -74,16 +72,15 @@ backoff_params = dict(retries=5, delay=1, backoff=1.5)
def describe_subnet_group(connection, subnet_group):
"""checks if instance exists"""
try:
- subnet_group_filter = dict(Name='replication-subnet-group-id',
- Values=[subnet_group])
+ subnet_group_filter = dict(Name="replication-subnet-group-id", Values=[subnet_group])
return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter])
except botocore.exceptions.ClientError:
- return {'ReplicationSubnetGroups': []}
+ return {"ReplicationSubnetGroups": []}
@AWSRetry.jittered_backoff(**backoff_params)
def replication_subnet_group_create(connection, **params):
- """ creates the replication subnet group """
+ """creates the replication subnet group"""
return connection.create_replication_subnet_group(**params)
@@ -94,17 +91,17 @@ def replication_subnet_group_modify(connection, **modify_params):
@AWSRetry.jittered_backoff(**backoff_params)
def replication_subnet_group_delete(module, connection):
- subnetid = module.params.get('identifier')
+ subnetid = module.params.get("identifier")
delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid)
return connection.delete_replication_subnet_group(**delete_parameters)
def replication_subnet_exists(subnet):
- """ Returns boolean based on the existence of the endpoint
+ """Returns boolean based on the existence of the endpoint
:param endpoint: dict containing the described endpoint
:return: bool
"""
- return bool(len(subnet['ReplicationSubnetGroups']))
+ return bool(len(subnet["ReplicationSubnetGroups"]))
def create_module_params(module):
@@ -114,9 +111,9 @@ def create_module_params(module):
"""
instance_parameters = dict(
# ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
- ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
- ReplicationSubnetGroupDescription=module.params.get('description'),
- SubnetIds=module.params.get('subnet_ids'),
+ ReplicationSubnetGroupIdentifier=module.params.get("identifier").lower(),
+ ReplicationSubnetGroupDescription=module.params.get("description"),
+ SubnetIds=module.params.get("subnet_ids"),
)
return instance_parameters
@@ -133,19 +130,18 @@ def compare_params(module, param_described):
modparams = create_module_params(module)
changed = False
# need to sanitize values that get returned from the API
- if 'VpcId' in param_described.keys():
- param_described.pop('VpcId')
- if 'SubnetGroupStatus' in param_described.keys():
- param_described.pop('SubnetGroupStatus')
+ if "VpcId" in param_described.keys():
+ param_described.pop("VpcId")
+ if "SubnetGroupStatus" in param_described.keys():
+ param_described.pop("SubnetGroupStatus")
for paramname in modparams.keys():
- if paramname in param_described.keys() and \
- param_described.get(paramname) == modparams[paramname]:
+ if paramname in param_described.keys() and param_described.get(paramname) == modparams[paramname]:
pass
- elif paramname == 'SubnetIds':
+ elif paramname == "SubnetIds":
subnets = []
- for subnet in param_described.get('Subnets'):
- subnets.append(subnet.get('SubnetIdentifier'))
- for modulesubnet in modparams['SubnetIds']:
+ for subnet in param_described.get("Subnets"):
+ subnets.append(subnet.get("SubnetIdentifier"))
+ for modulesubnet in modparams["SubnetIds"]:
if modulesubnet in subnets:
pass
else:
@@ -171,23 +167,19 @@ def modify_replication_subnet_group(module, connection):
def main():
argument_spec = dict(
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- identifier=dict(type='str', required=True),
- description=dict(type='str', required=True),
- subnet_ids=dict(type='list', elements='str', required=True),
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
+ state=dict(type="str", choices=["present", "absent"], default="present"),
+ identifier=dict(type="str", required=True),
+ description=dict(type="str", required=True),
+ subnet_ids=dict(type="list", elements="str", required=True),
)
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
exit_message = None
changed = False
- state = module.params.get('state')
- dmsclient = module.client('dms')
- subnet_group = describe_subnet_group(dmsclient,
- module.params.get('identifier'))
- if state == 'present':
+ state = module.params.get("state")
+ dmsclient = module.client("dms")
+ subnet_group = describe_subnet_group(dmsclient, module.params.get("identifier"))
+ if state == "present":
if replication_subnet_exists(subnet_group):
if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]):
if not module.check_mode:
@@ -204,7 +196,7 @@ def main():
else:
exit_message = "Check mode enabled"
- elif state == 'absent':
+ elif state == "absent":
if replication_subnet_exists(subnet_group):
if not module.check_mode:
replication_subnet_group_delete(module, dmsclient)
@@ -221,5 +213,5 @@ def main():
module.exit_json(changed=changed, msg=exit_message)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/dynamodb_table.py b/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
index 28d334fc9..86ba2f05e 100644
--- a/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
+++ b/ansible_collections/community/aws/plugins/modules/dynamodb_table.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: dynamodb_table
version_added: 1.0.0
@@ -125,30 +123,31 @@ options:
table_class:
description:
- The class of the table.
- - Requires at least botocore version 1.23.18.
choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS']
type: str
version_added: 3.1.0
wait_timeout:
description:
- How long (in seconds) to wait for creation / update / deletion to complete.
+ - AWS only allows secondary indexies to be updated one at a time, this module will automatically update them
+ in serial, and the timeout will be separately applied for each index.
aliases: ['wait_for_active_timeout']
- default: 300
+ default: 900
type: int
wait:
description:
- When I(wait=True) the module will wait for up to I(wait_timeout) seconds
- for table creation or deletion to complete before returning.
+ for index updates, table creation or deletion to complete before returning.
default: True
type: bool
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create dynamo table with hash and range primary key
community.aws.dynamodb_table:
name: my-table
@@ -197,9 +196,9 @@ EXAMPLES = r'''
name: my-table
region: us-east-1
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
table:
description: The returned table params from the describe API call.
returned: success
@@ -243,29 +242,39 @@ table_status:
returned: success
type: str
sample: ACTIVE
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-
-
-DYNAMO_TYPE_DEFAULT = 'STRING'
-INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
-INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
-INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_indexes_active
+from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_exists
+from ansible_collections.community.aws.plugins.module_utils.dynamodb import wait_table_not_exists
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+DYNAMO_TYPE_DEFAULT = "STRING"
+INDEX_REQUIRED_OPTIONS = ["name", "type", "hash_key_name"]
+INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + [
+ "hash_key_type",
+ "range_key_name",
+ "range_key_type",
+ "includes",
+ "read_capacity",
+ "write_capacity",
+]
+INDEX_TYPE_OPTIONS = ["all", "global_all", "global_include", "global_keys_only", "include", "keys_only"]
# Map in both directions
-DYNAMO_TYPE_MAP_LONG = {'STRING': 'S', 'NUMBER': 'N', 'BINARY': 'B'}
+DYNAMO_TYPE_MAP_LONG = {"STRING": "S", "NUMBER": "N", "BINARY": "B"}
DYNAMO_TYPE_MAP_SHORT = dict((v, k) for k, v in DYNAMO_TYPE_MAP_LONG.items())
KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys())
@@ -274,58 +283,43 @@ KEY_TYPE_CHOICES = list(DYNAMO_TYPE_MAP_LONG.keys())
# LimitExceededException/ResourceInUseException exceptions at you. This can be
# pretty slow, so add plenty of retries...
@AWSRetry.jittered_backoff(
- retries=45, delay=5, max_delay=30,
- catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'],
+ retries=45,
+ delay=5,
+ max_delay=30,
+ catch_extra_error_codes=["ResourceInUseException", "ResourceNotFoundException"],
)
def _update_table_with_long_retry(**changes):
- return client.update_table(
- TableName=module.params.get('name'),
- **changes
- )
+ return client.update_table(TableName=module.params.get("name"), **changes)
# ResourceNotFoundException is expected here if the table doesn't exist
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"])
def _describe_table(**params):
return client.describe_table(**params)
def wait_exists():
- table_name = module.params.get('name')
- wait_timeout = module.params.get('wait_timeout')
-
- delay = min(wait_timeout, 5)
- max_attempts = wait_timeout // delay
-
- try:
- waiter = client.get_waiter('table_exists')
- waiter.wait(
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
- TableName=table_name,
- )
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, msg='Timeout while waiting on table creation')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed while waiting on table creation')
+ wait_table_exists(
+ module,
+ module.params.get("wait_timeout"),
+ module.params.get("name"),
+ )
def wait_not_exists():
- table_name = module.params.get('name')
- wait_timeout = module.params.get('wait_timeout')
+ wait_table_not_exists(
+ module,
+ module.params.get("wait_timeout"),
+ module.params.get("name"),
+ )
- delay = min(wait_timeout, 5)
- max_attempts = wait_timeout // delay
- try:
- waiter = client.get_waiter('table_not_exists')
- waiter.wait(
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
- TableName=table_name,
- )
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, msg='Timeout while waiting on table deletion')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed while waiting on table deletion')
+def wait_indexes():
+ wait_indexes_active(
+ module,
+ module.params.get("wait_timeout"),
+ module.params.get("name"),
+ )
def _short_type_to_long(short_key):
@@ -361,21 +355,21 @@ def _decode_primary_index(current_table):
# The schema/attribute definitions are a list of dicts which need the same
# treatment as boto3's tag lists
schema = boto3_tag_list_to_ansible_dict(
- current_table.get('key_schema', []),
+ current_table.get("key_schema", []),
# Map from 'HASH'/'RANGE' to attribute name
- tag_name_key_name='key_type',
- tag_value_key_name='attribute_name',
+ tag_name_key_name="key_type",
+ tag_value_key_name="attribute_name",
)
attributes = boto3_tag_list_to_ansible_dict(
- current_table.get('attribute_definitions', []),
+ current_table.get("attribute_definitions", []),
# Map from attribute name to 'S'/'N'/'B'.
- tag_name_key_name='attribute_name',
- tag_value_key_name='attribute_type',
+ tag_name_key_name="attribute_name",
+ tag_value_key_name="attribute_type",
)
- hash_key_name = schema.get('HASH')
+ hash_key_name = schema.get("HASH")
hash_key_type = _short_type_to_long(attributes.get(hash_key_name, None))
- range_key_name = schema.get('RANGE', None)
+ range_key_name = schema.get("RANGE", None)
range_key_type = _short_type_to_long(attributes.get(range_key_name, None))
return dict(
@@ -386,56 +380,56 @@ def _decode_primary_index(current_table):
)
-def _decode_index(index_data, attributes, type_prefix=''):
+def _decode_index(index_data, attributes, type_prefix=""):
try:
index_map = dict(
- name=index_data['index_name'],
+ name=index_data["index_name"],
)
index_data = dict(index_data)
- index_data['attribute_definitions'] = attributes
+ index_data["attribute_definitions"] = attributes
index_map.update(_decode_primary_index(index_data))
- throughput = index_data.get('provisioned_throughput', {})
- index_map['provisioned_throughput'] = throughput
+ throughput = index_data.get("provisioned_throughput", {})
+ index_map["provisioned_throughput"] = throughput
if throughput:
- index_map['read_capacity'] = throughput.get('read_capacity_units')
- index_map['write_capacity'] = throughput.get('write_capacity_units')
+ index_map["read_capacity"] = throughput.get("read_capacity_units")
+ index_map["write_capacity"] = throughput.get("write_capacity_units")
- projection = index_data.get('projection', {})
+ projection = index_data.get("projection", {})
if projection:
- index_map['type'] = type_prefix + projection.get('projection_type')
- index_map['includes'] = projection.get('non_key_attributes', [])
+ index_map["type"] = type_prefix + projection.get("projection_type")
+ index_map["includes"] = projection.get("non_key_attributes", [])
return index_map
except Exception as e:
- module.fail_json_aws(e, msg='Decode failure', index_data=index_data)
+ module.fail_json_aws(e, msg="Decode failure", index_data=index_data)
def compatability_results(current_table):
if not current_table:
return dict()
- billing_mode = current_table.get('billing_mode')
+ billing_mode = current_table.get("billing_mode")
primary_indexes = _decode_primary_index(current_table)
- hash_key_name = primary_indexes.get('hash_key_name')
- hash_key_type = primary_indexes.get('hash_key_type')
- range_key_name = primary_indexes.get('range_key_name')
- range_key_type = primary_indexes.get('range_key_type')
+ hash_key_name = primary_indexes.get("hash_key_name")
+ hash_key_type = primary_indexes.get("hash_key_type")
+ range_key_name = primary_indexes.get("range_key_name")
+ range_key_type = primary_indexes.get("range_key_type")
indexes = list()
- global_indexes = current_table.get('_global_index_map', {})
- local_indexes = current_table.get('_local_index_map', {})
+ global_indexes = current_table.get("_global_index_map", {})
+ local_indexes = current_table.get("_local_index_map", {})
for index in global_indexes:
idx = dict(global_indexes[index])
- idx.pop('provisioned_throughput', None)
+ idx.pop("provisioned_throughput", None)
indexes.append(idx)
for index in local_indexes:
idx = dict(local_indexes[index])
- idx.pop('provisioned_throughput', None)
+ idx.pop("provisioned_throughput", None)
indexes.append(idx)
compat_results = dict(
@@ -446,72 +440,78 @@ def compatability_results(current_table):
indexes=indexes,
billing_mode=billing_mode,
region=module.region,
- table_name=current_table.get('table_name', None),
- table_class=current_table.get('table_class_summary', {}).get('table_class', None),
- table_status=current_table.get('table_status', None),
- tags=current_table.get('tags', {}),
+ table_name=current_table.get("table_name", None),
+ table_class=current_table.get("table_class_summary", {}).get("table_class", None),
+ table_status=current_table.get("table_status", None),
+ tags=current_table.get("tags", {}),
)
if billing_mode == "PROVISIONED":
- throughput = current_table.get('provisioned_throughput', {})
- compat_results['read_capacity'] = throughput.get('read_capacity_units', None)
- compat_results['write_capacity'] = throughput.get('write_capacity_units', None)
+ throughput = current_table.get("provisioned_throughput", {})
+ compat_results["read_capacity"] = throughput.get("read_capacity_units", None)
+ compat_results["write_capacity"] = throughput.get("write_capacity_units", None)
return compat_results
def get_dynamodb_table():
- table_name = module.params.get('name')
+ table_name = module.params.get("name")
try:
table = _describe_table(TableName=table_name)
- except is_boto3_error_code('ResourceNotFoundException'):
+ except is_boto3_error_code("ResourceNotFoundException"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to describe table')
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe table")
- table = table['Table']
+ table = table["Table"]
try:
- tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table['TableArn'])['Tags']
- except is_boto3_error_code('AccessDeniedException'):
- module.warn('Permission denied when listing tags')
+ tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table["TableArn"])["Tags"]
+ except is_boto3_error_code("AccessDeniedException"):
+ module.warn("Permission denied when listing tags")
tags = []
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to list table tags')
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list table tags")
tags = boto3_tag_list_to_ansible_dict(tags)
table = camel_dict_to_snake_dict(table)
# Put some of the values into places people will expect them
- table['arn'] = table['table_arn']
- table['name'] = table['table_name']
- table['status'] = table['table_status']
- table['id'] = table['table_id']
- table['size'] = table['table_size_bytes']
- table['tags'] = tags
+ table["arn"] = table["table_arn"]
+ table["name"] = table["table_name"]
+ table["status"] = table["table_status"]
+ table["id"] = table["table_id"]
+ table["size"] = table["table_size_bytes"]
+ table["tags"] = tags
- if 'table_class_summary' in table:
- table['table_class'] = table['table_class_summary']['table_class']
+ if "table_class_summary" in table:
+ table["table_class"] = table["table_class_summary"]["table_class"]
# billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST
# and when updating the billing_mode
- if 'billing_mode_summary' in table:
- table['billing_mode'] = table['billing_mode_summary']['billing_mode']
+ if "billing_mode_summary" in table:
+ table["billing_mode"] = table["billing_mode_summary"]["billing_mode"]
else:
- table['billing_mode'] = "PROVISIONED"
+ table["billing_mode"] = "PROVISIONED"
# convert indexes into something we can easily search against
- attributes = table['attribute_definitions']
+ attributes = table["attribute_definitions"]
global_index_map = dict()
local_index_map = dict()
- for index in table.get('global_secondary_indexes', []):
- idx = _decode_index(index, attributes, type_prefix='global_')
- global_index_map[idx['name']] = idx
- for index in table.get('local_secondary_indexes', []):
+ for index in table.get("global_secondary_indexes", []):
+ idx = _decode_index(index, attributes, type_prefix="global_")
+ global_index_map[idx["name"]] = idx
+ for index in table.get("local_secondary_indexes", []):
idx = _decode_index(index, attributes)
- local_index_map[idx['name']] = idx
- table['_global_index_map'] = global_index_map
- table['_local_index_map'] = local_index_map
+ local_index_map[idx["name"]] = idx
+ table["_global_index_map"] = global_index_map
+ table["_local_index_map"] = local_index_map
return table
@@ -522,19 +522,19 @@ def _generate_attribute_map():
"""
attributes = dict()
- for index in (module.params, *module.params.get('indexes')):
+ for index in (module.params, *module.params.get("indexes")):
# run through hash_key_name and range_key_name
- for t in ['hash', 'range']:
- key_name = index.get(t + '_key_name')
+ for t in ["hash", "range"]:
+ key_name = index.get(t + "_key_name")
if not key_name:
continue
- key_type = index.get(t + '_key_type') or DYNAMO_TYPE_DEFAULT
+ key_type = index.get(t + "_key_type") or DYNAMO_TYPE_DEFAULT
_type = _long_type_to_short(key_type)
if key_name in attributes:
if _type != attributes[key_name]:
- module.fail_json(msg='Conflicting attribute type',
- type_1=_type, type_2=attributes[key_name],
- key_name=key_name)
+ module.fail_json(
+ msg="Conflicting attribute type", type_1=_type, type_2=attributes[key_name], key_name=key_name
+ )
else:
attributes[key_name] = _type
@@ -547,9 +547,7 @@ def _generate_attributes():
# Use ansible_dict_to_boto3_tag_list to generate the list of dicts
# format we need
attrs = ansible_dict_to_boto3_tag_list(
- attributes,
- tag_name_key_name='AttributeName',
- tag_value_key_name='AttributeType'
+ attributes, tag_name_key_name="AttributeName", tag_value_key_name="AttributeType"
)
return list(attrs)
@@ -558,8 +556,8 @@ def _generate_throughput(params=None):
if not params:
params = module.params
- read_capacity = params.get('read_capacity') or 1
- write_capacity = params.get('write_capacity') or 1
+ read_capacity = params.get("read_capacity") or 1
+ write_capacity = params.get("write_capacity") or 1
throughput = dict(
ReadCapacityUnits=read_capacity,
WriteCapacityUnits=write_capacity,
@@ -573,56 +571,54 @@ def _generate_schema(params=None):
params = module.params
schema = list()
- hash_key_name = params.get('hash_key_name')
- range_key_name = params.get('range_key_name')
+ hash_key_name = params.get("hash_key_name")
+ range_key_name = params.get("range_key_name")
if hash_key_name:
- entry = _schema_dict(hash_key_name, 'HASH')
+ entry = _schema_dict(hash_key_name, "HASH")
schema.append(entry)
if range_key_name:
- entry = _schema_dict(range_key_name, 'RANGE')
+ entry = _schema_dict(range_key_name, "RANGE")
schema.append(entry)
return schema
def _primary_index_changes(current_table):
-
primary_index = _decode_primary_index(current_table)
- hash_key_name = primary_index.get('hash_key_name')
- _hash_key_name = module.params.get('hash_key_name')
- hash_key_type = primary_index.get('hash_key_type')
- _hash_key_type = module.params.get('hash_key_type')
- range_key_name = primary_index.get('range_key_name')
- _range_key_name = module.params.get('range_key_name')
- range_key_type = primary_index.get('range_key_type')
- _range_key_type = module.params.get('range_key_type')
+ hash_key_name = primary_index.get("hash_key_name")
+ _hash_key_name = module.params.get("hash_key_name")
+ hash_key_type = primary_index.get("hash_key_type")
+ _hash_key_type = module.params.get("hash_key_type")
+ range_key_name = primary_index.get("range_key_name")
+ _range_key_name = module.params.get("range_key_name")
+ range_key_type = primary_index.get("range_key_type")
+ _range_key_type = module.params.get("range_key_type")
changed = list()
if _hash_key_name and (_hash_key_name != hash_key_name):
- changed.append('hash_key_name')
+ changed.append("hash_key_name")
if _hash_key_type and (_hash_key_type != hash_key_type):
- changed.append('hash_key_type')
+ changed.append("hash_key_type")
if _range_key_name and (_range_key_name != range_key_name):
- changed.append('range_key_name')
+ changed.append("range_key_name")
if _range_key_type and (_range_key_type != range_key_type):
- changed.append('range_key_type')
+ changed.append("range_key_type")
return changed
def _throughput_changes(current_table, params=None):
-
if not params:
params = module.params
- throughput = current_table.get('provisioned_throughput', {})
- read_capacity = throughput.get('read_capacity_units', None)
- _read_capacity = params.get('read_capacity') or read_capacity
- write_capacity = throughput.get('write_capacity_units', None)
- _write_capacity = params.get('write_capacity') or write_capacity
+ throughput = current_table.get("provisioned_throughput", {})
+ read_capacity = throughput.get("read_capacity_units", None)
+ _read_capacity = params.get("read_capacity") or read_capacity
+ write_capacity = throughput.get("write_capacity_units", None)
+ _write_capacity = params.get("write_capacity") or write_capacity
if (read_capacity != _read_capacity) or (write_capacity != _write_capacity):
return dict(
@@ -642,14 +638,14 @@ def _generate_global_indexes(billing_mode):
if billing_mode == "PAY_PER_REQUEST":
include_throughput = False
- for index in module.params.get('indexes'):
- if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']:
+ for index in module.params.get("indexes"):
+ if index.get("type") not in ["global_all", "global_include", "global_keys_only"]:
continue
- name = index.get('name')
+ name = index.get("name")
if name in index_exists:
- module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name))
+ module.fail_json(msg=f"Duplicate key {name} in list of global indexes")
# Convert the type name to upper case and remove the global_
- index['type'] = index['type'].upper()[7:]
+ index["type"] = index["type"].upper()[7:]
index = _generate_index(index, include_throughput)
index_exists[name] = True
indexes.append(index)
@@ -661,14 +657,13 @@ def _generate_local_indexes():
index_exists = dict()
indexes = list()
- for index in module.params.get('indexes'):
- index = dict()
- if index.get('type') not in ['all', 'include', 'keys_only']:
+ for index in module.params.get("indexes"):
+ if index.get("type") not in ["all", "include", "keys_only"]:
continue
- name = index.get('name')
+ name = index.get("name")
if name in index_exists:
- module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name))
- index['type'] = index['type'].upper()
+ module.fail_json(msg=f"Duplicate key {name} in list of local indexes")
+ index["type"] = index["type"].upper()
index = _generate_index(index, False)
index_exists[name] = True
indexes.append(index)
@@ -678,32 +673,32 @@ def _generate_local_indexes():
def _generate_global_index_map(current_table):
global_index_map = dict()
- existing_indexes = current_table['_global_index_map']
- for index in module.params.get('indexes'):
- if index.get('type') not in ['global_all', 'global_include', 'global_keys_only']:
+ existing_indexes = current_table["_global_index_map"]
+ for index in module.params.get("indexes"):
+ if index.get("type") not in ["global_all", "global_include", "global_keys_only"]:
continue
- name = index.get('name')
+ name = index.get("name")
if name in global_index_map:
- module.fail_json(msg='Duplicate key {0} in list of global indexes'.format(name))
+ module.fail_json(msg=f"Duplicate key {name} in list of global indexes")
idx = _merge_index_params(index, existing_indexes.get(name, {}))
# Convert the type name to upper case and remove the global_
- idx['type'] = idx['type'].upper()[7:]
+ idx["type"] = idx["type"].upper()[7:]
global_index_map[name] = idx
return global_index_map
def _generate_local_index_map(current_table):
local_index_map = dict()
- existing_indexes = current_table['_local_index_map']
- for index in module.params.get('indexes'):
- if index.get('type') not in ['all', 'include', 'keys_only']:
+ existing_indexes = current_table["_local_index_map"]
+ for index in module.params.get("indexes"):
+ if index.get("type") not in ["all", "include", "keys_only"]:
continue
- name = index.get('name')
+ name = index.get("name")
if name in local_index_map:
- module.fail_json(msg='Duplicate key {0} in list of local indexes'.format(name))
+ module.fail_json(msg=f"Duplicate key {name} in list of local indexes")
idx = _merge_index_params(index, existing_indexes.get(name, {}))
# Convert the type name to upper case
- idx['type'] = idx['type'].upper()
+ idx["type"] = idx["type"].upper()
local_index_map[name] = idx
return local_index_map
@@ -711,27 +706,28 @@ def _generate_local_index_map(current_table):
def _generate_index(index, include_throughput=True):
key_schema = _generate_schema(index)
throughput = _generate_throughput(index)
- non_key_attributes = index['includes'] or []
+ non_key_attributes = index["includes"] or []
projection = dict(
- ProjectionType=index['type'],
+ ProjectionType=index["type"],
)
- if index['type'] != 'ALL':
+ if index["type"] != "ALL":
if non_key_attributes:
- projection['NonKeyAttributes'] = non_key_attributes
+ projection["NonKeyAttributes"] = non_key_attributes
else:
if non_key_attributes:
module.fail_json(
- "DynamoDB does not support specifying non-key-attributes ('includes') for "
- "indexes of type 'all'. Index name: {0}".format(index['name']))
+ "DynamoDB does not support specifying non-key-attributes ('includes') for indexes of type 'all'. Index"
+ f" name: {index['name']}"
+ )
idx = dict(
- IndexName=index['name'],
+ IndexName=index["name"],
KeySchema=key_schema,
Projection=projection,
)
if include_throughput:
- idx['ProvisionedThroughput'] = throughput
+ idx["ProvisionedThroughput"] = throughput
return idx
@@ -742,15 +738,15 @@ def _attribute_changes(current_table):
def _global_index_changes(current_table):
- current_global_index_map = current_table['_global_index_map']
+ current_global_index_map = current_table["_global_index_map"]
global_index_map = _generate_global_index_map(current_table)
- current_billing_mode = current_table.get('billing_mode')
+ current_billing_mode = current_table.get("billing_mode")
- if module.params.get('billing_mode') is None:
+ if module.params.get("billing_mode") is None:
billing_mode = current_billing_mode
else:
- billing_mode = module.params.get('billing_mode')
+ billing_mode = module.params.get("billing_mode")
include_throughput = True
@@ -761,7 +757,6 @@ def _global_index_changes(current_table):
# TODO (future) it would be nice to add support for deleting an index
for name in global_index_map:
-
idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput))
if name not in current_global_index_map:
index_changes.append(dict(Create=idx))
@@ -798,37 +793,37 @@ def _update_table(current_table):
# Get throughput / billing_mode changes
throughput_changes = _throughput_changes(current_table)
if throughput_changes:
- changes['ProvisionedThroughput'] = throughput_changes
+ changes["ProvisionedThroughput"] = throughput_changes
- current_billing_mode = current_table.get('billing_mode')
- new_billing_mode = module.params.get('billing_mode')
+ current_billing_mode = current_table.get("billing_mode")
+ new_billing_mode = module.params.get("billing_mode")
if new_billing_mode is None:
new_billing_mode = current_billing_mode
if current_billing_mode != new_billing_mode:
- changes['BillingMode'] = new_billing_mode
+ changes["BillingMode"] = new_billing_mode
# Update table_class use exisiting if none is defined
- if module.params.get('table_class'):
- if module.params.get('table_class') != current_table.get('table_class'):
- changes['TableClass'] = module.params.get('table_class')
+ if module.params.get("table_class"):
+ if module.params.get("table_class") != current_table.get("table_class"):
+ changes["TableClass"] = module.params.get("table_class")
global_index_changes = _global_index_changes(current_table)
if global_index_changes:
- changes['GlobalSecondaryIndexUpdates'] = global_index_changes
+ changes["GlobalSecondaryIndexUpdates"] = global_index_changes
# Only one index can be changed at a time except if changing the billing mode, pass the first during the
# main update and deal with the others on a slow retry to wait for
# completion
if current_billing_mode == new_billing_mode:
if len(global_index_changes) > 1:
- changes['GlobalSecondaryIndexUpdates'] = [global_index_changes[0]]
+ changes["GlobalSecondaryIndexUpdates"] = [global_index_changes[0]]
additional_global_index_changes = global_index_changes[1:]
local_index_changes = _local_index_changes(current_table)
if local_index_changes:
- changes['LocalSecondaryIndexUpdates'] = local_index_changes
+ changes["LocalSecondaryIndexUpdates"] = local_index_changes
if not changes:
return False
@@ -837,38 +832,39 @@ def _update_table(current_table):
return True
if global_index_changes or local_index_changes:
- changes['AttributeDefinitions'] = _generate_attributes()
+ changes["AttributeDefinitions"] = _generate_attributes()
try:
- client.update_table(
- aws_retry=True,
- TableName=module.params.get('name'),
- **changes
- )
+ client.update_table(aws_retry=True, TableName=module.params.get("name"), **changes)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update table")
if additional_global_index_changes:
for index in additional_global_index_changes:
+ wait_indexes()
try:
- _update_table_with_long_retry(GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes['AttributeDefinitions'])
+ _update_table_with_long_retry(
+ GlobalSecondaryIndexUpdates=[index], AttributeDefinitions=changes["AttributeDefinitions"]
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to update table", changes=changes,
- additional_global_index_changes=additional_global_index_changes)
-
- if module.params.get('wait'):
- wait_exists()
+ module.fail_json_aws(
+ e,
+ msg="Failed to update table",
+ changes=changes,
+ additional_global_index_changes=additional_global_index_changes,
+ )
return True
def _update_tags(current_table):
- _tags = module.params.get('tags')
+ _tags = module.params.get("tags")
if _tags is None:
return False
- tags_to_add, tags_to_remove = compare_aws_tags(current_table['tags'], module.params.get('tags'),
- purge_tags=module.params.get('purge_tags'))
+ tags_to_add, tags_to_remove = compare_aws_tags(
+ current_table["tags"], module.params.get("tags"), purge_tags=module.params.get("purge_tags")
+ )
# If neither need updating we can return already
if not (tags_to_add or tags_to_remove):
@@ -881,7 +877,7 @@ def _update_tags(current_table):
try:
client.tag_resource(
aws_retry=True,
- ResourceArn=current_table['arn'],
+ ResourceArn=current_table["arn"],
Tags=ansible_dict_to_boto3_tag_list(tags_to_add),
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -890,7 +886,7 @@ def _update_tags(current_table):
try:
client.untag_resource(
aws_retry=True,
- ResourceArn=current_table['arn'],
+ ResourceArn=current_table["arn"],
TagKeys=tags_to_remove,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -902,28 +898,31 @@ def _update_tags(current_table):
def update_table(current_table):
primary_index_changes = _primary_index_changes(current_table)
if primary_index_changes:
- module.fail_json("DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format(primary_index_changes))
+ module.fail_json(
+ f"DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {primary_index_changes}"
+ )
changed = False
changed |= _update_table(current_table)
changed |= _update_tags(current_table)
- if module.params.get('wait'):
+ if module.params.get("wait"):
wait_exists()
+ wait_indexes()
return changed
def create_table():
- table_name = module.params.get('name')
- table_class = module.params.get('table_class')
- hash_key_name = module.params.get('hash_key_name')
- billing_mode = module.params.get('billing_mode')
+ table_name = module.params.get("name")
+ table_class = module.params.get("table_class")
+ hash_key_name = module.params.get("hash_key_name")
+ billing_mode = module.params.get("billing_mode")
if billing_mode is None:
billing_mode = "PROVISIONED"
- tags = ansible_dict_to_boto3_tag_list(module.params.get('tags') or {})
+ tags = ansible_dict_to_boto3_tag_list(module.params.get("tags") or {})
if not hash_key_name:
module.fail_json('"hash_key_name" must be provided when creating a new table.')
@@ -951,21 +950,22 @@ def create_table():
)
if table_class:
- params['TableClass'] = table_class
+ params["TableClass"] = table_class
if billing_mode == "PROVISIONED":
- params['ProvisionedThroughput'] = throughput
+ params["ProvisionedThroughput"] = throughput
if local_indexes:
- params['LocalSecondaryIndexes'] = local_indexes
+ params["LocalSecondaryIndexes"] = local_indexes
if global_indexes:
- params['GlobalSecondaryIndexes'] = global_indexes
+ params["GlobalSecondaryIndexes"] = global_indexes
try:
client.create_table(aws_retry=True, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to create table')
+ module.fail_json_aws(e, msg="Failed to create table")
- if module.params.get('wait'):
+ if module.params.get("wait"):
wait_exists()
+ wait_indexes()
return True
@@ -977,30 +977,34 @@ def delete_table(current_table):
if module.check_mode:
return True
- table_name = module.params.get('name')
+ table_name = module.params.get("name")
# If an index is mid-update then we have to wait for the update to complete
# before deletion will succeed
long_retry = AWSRetry.jittered_backoff(
- retries=45, delay=5, max_delay=30,
- catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException'],
+ retries=45,
+ delay=5,
+ max_delay=30,
+ catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"],
)
try:
long_retry(client.delete_table)(TableName=table_name)
- except is_boto3_error_code('ResourceNotFoundException'):
+ except is_boto3_error_code("ResourceNotFoundException"):
return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to delete table')
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to delete table")
- if module.params.get('wait'):
+ if module.params.get("wait"):
wait_not_exists()
return True
def main():
-
global module
global client
@@ -1008,36 +1012,36 @@ def main():
# different parameters, use a separate namespace for names,
# and local indexes can't be updated.
index_options = dict(
- name=dict(type='str', required=True),
+ name=dict(type="str", required=True),
# It would be nice to make this optional, but because Local and Global
# indexes are mixed in here we need this to be able to tell to which
# group of indexes the index belongs.
- type=dict(type='str', required=True, choices=INDEX_TYPE_OPTIONS),
- hash_key_name=dict(type='str', required=False),
- hash_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES),
- range_key_name=dict(type='str', required=False),
- range_key_type=dict(type='str', required=False, choices=KEY_TYPE_CHOICES),
- includes=dict(type='list', required=False, elements='str'),
- read_capacity=dict(type='int', required=False),
- write_capacity=dict(type='int', required=False),
+ type=dict(type="str", required=True, choices=INDEX_TYPE_OPTIONS),
+ hash_key_name=dict(type="str", required=False),
+ hash_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES),
+ range_key_name=dict(type="str", required=False),
+ range_key_type=dict(type="str", required=False, choices=KEY_TYPE_CHOICES),
+ includes=dict(type="list", required=False, elements="str"),
+ read_capacity=dict(type="int", required=False),
+ write_capacity=dict(type="int", required=False),
)
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
- name=dict(required=True, type='str'),
- hash_key_name=dict(type='str'),
- hash_key_type=dict(type='str', choices=KEY_TYPE_CHOICES),
- range_key_name=dict(type='str'),
- range_key_type=dict(type='str', choices=KEY_TYPE_CHOICES),
- billing_mode=dict(type='str', choices=['PROVISIONED', 'PAY_PER_REQUEST']),
- read_capacity=dict(type='int'),
- write_capacity=dict(type='int'),
- indexes=dict(default=[], type='list', elements='dict', options=index_options),
- table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=300, type='int', aliases=['wait_for_active_timeout']),
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(required=True, type="str"),
+ hash_key_name=dict(type="str"),
+ hash_key_type=dict(type="str", choices=KEY_TYPE_CHOICES),
+ range_key_name=dict(type="str"),
+ range_key_type=dict(type="str", choices=KEY_TYPE_CHOICES),
+ billing_mode=dict(type="str", choices=["PROVISIONED", "PAY_PER_REQUEST"]),
+ read_capacity=dict(type="int"),
+ write_capacity=dict(type="int"),
+ indexes=dict(default=[], type="list", elements="dict", options=index_options),
+ table_class=dict(type="str", choices=["STANDARD", "STANDARD_INFREQUENT_ACCESS"]),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(default=900, type="int", aliases=["wait_for_active_timeout"]),
)
module = AnsibleAWSModule(
@@ -1047,41 +1051,38 @@ def main():
)
retry_decorator = AWSRetry.jittered_backoff(
- catch_extra_error_codes=['LimitExceededException', 'ResourceInUseException', 'ResourceNotFoundException'],
+ catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"],
)
- client = module.client('dynamodb', retry_decorator=retry_decorator)
-
- if module.params.get('table_class'):
- module.require_botocore_at_least('1.23.18', reason='to set table_class')
+ client = module.client("dynamodb", retry_decorator=retry_decorator)
current_table = get_dynamodb_table()
changed = False
table = None
results = dict()
- state = module.params.get('state')
- if state == 'present':
+ state = module.params.get("state")
+ if state == "present":
if current_table:
changed |= update_table(current_table)
else:
changed |= create_table()
table = get_dynamodb_table()
- elif state == 'absent':
+ elif state == "absent":
changed |= delete_table(current_table)
compat_results = compatability_results(table)
if compat_results:
results.update(compat_results)
- results['changed'] = changed
+ results["changed"] = changed
if table:
# These are used to pass computed data about, not needed for users
- table.pop('_global_index_map', None)
- table.pop('_local_index_map', None)
- results['table'] = table
+ table.pop("_global_index_map", None)
+ table.pop("_local_index_map", None)
+ results["table"] = table
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py b/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
index 9cbbb3e5e..eca236cf4 100644
--- a/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
+++ b/ansible_collections/community/aws/plugins/modules/dynamodb_ttl.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: dynamodb_ttl
version_added: 1.0.0
@@ -32,14 +30,15 @@ options:
required: true
type: str
-author: Ted Timmons (@tedder)
+author:
+- Ted Timmons (@tedder)
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
+- amazon.aws.common.modules
+- amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: enable TTL on my cowfacts table
community.aws.dynamodb_ttl:
state: enable
@@ -51,9 +50,9 @@ EXAMPLES = '''
state: disable
table_name: cowfacts
attribute_name: cow_deleted_date
-'''
+"""
-RETURN = '''
+RETURN = r"""
current_status:
description: current or new TTL specification.
type: dict
@@ -61,59 +60,59 @@ current_status:
sample:
- { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" }
- { "AttributeName": "deploy_timestamp", "Enabled": true }
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_current_ttl_state(c, table_name):
- '''Fetch the state dict for a table.'''
+ """Fetch the state dict for a table."""
current_state = c.describe_time_to_live(TableName=table_name)
- return current_state.get('TimeToLiveDescription')
+ return current_state.get("TimeToLiveDescription")
def does_state_need_changing(attribute_name, desired_state, current_spec):
- '''Run checks to see if the table needs to be modified. Basically a dirty check.'''
+ """Run checks to see if the table needs to be modified. Basically a dirty check."""
if not current_spec:
# we don't have an entry (or a table?)
return True
- if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']:
+ if desired_state.lower() == "enable" and current_spec.get("TimeToLiveStatus") not in ["ENABLING", "ENABLED"]:
return True
- if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']:
+ if desired_state.lower() == "disable" and current_spec.get("TimeToLiveStatus") not in ["DISABLING", "DISABLED"]:
return True
- if attribute_name != current_spec.get('AttributeName'):
+ if attribute_name != current_spec.get("AttributeName"):
return True
return False
def set_ttl_state(c, table_name, state, attribute_name):
- '''Set our specification. Returns the update_time_to_live specification dict,
- which is different than the describe_* call.'''
+ """Set our specification. Returns the update_time_to_live specification dict,
+ which is different than the describe_* call."""
is_enabled = False
- if state.lower() == 'enable':
+ if state.lower() == "enable":
is_enabled = True
ret = c.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={
- 'Enabled': is_enabled,
- 'AttributeName': attribute_name
- }
+ "Enabled": is_enabled,
+ "AttributeName": attribute_name,
+ },
)
- return ret.get('TimeToLiveSpecification')
+ return ret.get("TimeToLiveSpecification")
def main():
argument_spec = dict(
- state=dict(choices=['enable', 'disable']),
+ state=dict(choices=["enable", "disable"]),
table_name=dict(required=True),
attribute_name=dict(required=True),
)
@@ -122,26 +121,28 @@ def main():
)
try:
- dbclient = module.client('dynamodb')
+ dbclient = module.client("dynamodb")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- result = {'changed': False}
- state = module.params['state']
+ result = {"changed": False}
+ state = module.params["state"]
# wrap all our calls to catch the standard exceptions. We don't pass `module` in to the
# methods so it's easier to do here.
try:
- current_state = get_current_ttl_state(dbclient, module.params['table_name'])
+ current_state = get_current_ttl_state(dbclient, module.params["table_name"])
- if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state):
+ if does_state_need_changing(module.params["attribute_name"], module.params["state"], current_state):
# changes needed
- new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name'])
- result['current_status'] = new_state
- result['changed'] = True
+ new_state = set_ttl_state(
+ dbclient, module.params["table_name"], module.params["state"], module.params["attribute_name"]
+ )
+ result["current_status"] = new_state
+ result["changed"] = True
else:
# no changes needed
- result['current_status'] = current_state
+ result["current_status"] = current_state
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Failed to get or update ttl state")
@@ -153,5 +154,5 @@ def main():
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py b/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
index 15a69163d..bb5a30ea1 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ec2_ami_copy
version_added: 1.0.0
@@ -72,12 +69,12 @@ author:
- Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
- Tim C (@defunctio) <defunct@defunct.io>
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Basic AMI Copy
community.aws.ec2_ami_copy:
source_region: us-east-1
@@ -107,8 +104,8 @@ EXAMPLES = '''
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
- Name: My-Super-AMI
- Patch: 1.2.3
+ Name: My-Super-AMI
+ Patch: 1.2.3
tag_equality: true
- name: Encrypted AMI copy
@@ -125,26 +122,29 @@ EXAMPLES = '''
source_image_id: ami-xxxxxxx
encrypted: true
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
-'''
+"""
-RETURN = '''
+RETURN = r"""
image_id:
description: AMI ID of the copied AMI
returned: always
type: str
sample: ami-e689729e
-'''
+"""
try:
- from botocore.exceptions import ClientError, WaiterError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import WaiterError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def copy_image(module, ec2):
@@ -157,67 +157,67 @@ def copy_image(module, ec2):
image = None
changed = False
- tags = module.params.get('tags')
-
- params = {'SourceRegion': module.params.get('source_region'),
- 'SourceImageId': module.params.get('source_image_id'),
- 'Name': module.params.get('name'),
- 'Description': module.params.get('description'),
- 'Encrypted': module.params.get('encrypted'),
- }
- if module.params.get('kms_key_id'):
- params['KmsKeyId'] = module.params.get('kms_key_id')
+ tags = module.params.get("tags")
+
+ params = {
+ "SourceRegion": module.params.get("source_region"),
+ "SourceImageId": module.params.get("source_image_id"),
+ "Name": module.params.get("name"),
+ "Description": module.params.get("description"),
+ "Encrypted": module.params.get("encrypted"),
+ }
+ if module.params.get("kms_key_id"):
+ params["KmsKeyId"] = module.params.get("kms_key_id")
try:
- if module.params.get('tag_equality'):
- filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
- filters.append(dict(Name='state', Values=['available', 'pending']))
+ if module.params.get("tag_equality"):
+ filters = [{"Name": f"tag:{k}", "Values": [v]} for (k, v) in module.params.get("tags").items()]
+ filters.append(dict(Name="state", Values=["available", "pending"]))
images = ec2.describe_images(Filters=filters)
- if len(images['Images']) > 0:
- image = images['Images'][0]
+ if len(images["Images"]) > 0:
+ image = images["Images"][0]
if not image:
image = ec2.copy_image(**params)
- image_id = image['ImageId']
+ image_id = image["ImageId"]
if tags:
- ec2.create_tags(Resources=[image_id],
- Tags=ansible_dict_to_boto3_tag_list(tags))
+ ec2.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags))
changed = True
- if module.params.get('wait'):
+ if module.params.get("wait"):
delay = 15
- max_attempts = module.params.get('wait_timeout') // delay
- image_id = image.get('ImageId')
- ec2.get_waiter('image_available').wait(
- ImageIds=[image_id],
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ max_attempts = module.params.get("wait_timeout") // delay
+ image_id = image.get("ImageId")
+ ec2.get_waiter("image_available").wait(
+ ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}
)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
except WaiterError as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
+ module.fail_json_aws(e, msg="An error occurred waiting for the image to become available")
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not copy AMI")
except Exception as e:
- module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
+ module.fail_json(msg=f"Unhandled exception. ({to_native(e)})")
def main():
argument_spec = dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
- name=dict(default='default'),
- description=dict(default=''),
- encrypted=dict(type='bool', default=False, required=False),
- kms_key_id=dict(type='str', required=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=600),
- tags=dict(type='dict', aliases=['resource_tags']),
- tag_equality=dict(type='bool', default=False))
+ name=dict(default="default"),
+ description=dict(default=""),
+ encrypted=dict(type="bool", default=False, required=False),
+ kms_key_id=dict(type="str", required=False),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=600),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ tag_equality=dict(type="bool", default=False),
+ )
module = AnsibleAWSModule(argument_spec=argument_spec)
- ec2 = module.client('ec2')
+ ec2 = module.client("ec2")
copy_image(module, ec2)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py
new file mode 100644
index 000000000..97d62b5fc
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: ec2_carrier_gateway
+version_added: 6.0.0
+short_description: Manage an AWS VPC Carrier gateway
+description:
+ - Manage an AWS VPC Carrier gateway.
+author:
+ - "Marco Braga (@mtulio)"
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Carrier Gateway.
+ required: true
+ type: str
+ carrier_gateway_id:
+ description:
+ - The Carrier Gateway ID to manage the Carrier Gateway.
+ required: false
+ type: str
+ state:
+ description:
+ - Create or terminate the Carrier Gateway.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Carrier Gateway.
+# The Carrier Gateway ID can be accessed via {{cagw.carrier_gateway_id}} for use in setting up Route tables etc.
+- name: Create Carrier gateway
+ community.aws.ec2_carrier_gateway:
+ vpc_id: vpc-abcdefgh
+ state: present
+ register: cagw
+
+- name: Create Carrier gateway with tags
+ community.aws.ec2_carrier_gateway:
+ vpc_id: vpc-abcdefgh
+ state: present
+ tags:
+ Tag1: tag1
+ Tag2: tag2
+ register: cagw
+
+- name: Delete Carrier gateway
+ community.aws.ec2_carrier_gateway:
+ vpc_id: vpc-abcdefgh
+ carrier_gateway_id: "cagw-123"
+ state: absent
+ register: vpc_cagw_delete
+"""
+
+RETURN = r"""
+changed:
+ description: If any changes have been made to the Carrier Gateway.
+ type: bool
+ returned: always
+ sample:
+ changed: false
+carrier_gateway_id:
+ description: The unique identifier for the Carrier Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ carrier_gateway_id: "cagw-XXXXXXXX"
+tags:
+ description: The tags associated the Carrier Gateway.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ "Ansible": "Test"
+vpc_id:
+ description: The VPC ID associated with the Carrier Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpc_id: "vpc-XXXXXXXX"
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+
+@AWSRetry.jittered_backoff(retries=10, delay=10)
+def describe_cagws_with_backoff(connection, **params):
+ paginator = connection.get_paginator("describe_carrier_gateways")
+ return paginator.paginate(**params).build_full_result()["CarrierGateways"]
+
+
+class AnsibleEc2Cagw:
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ vpc_id = self._module.params.get("vpc_id")
+ state = self._module.params.get("state", "present")
+ tags = self._module.params.get("tags")
+ purge_tags = self._module.params.get("purge_tags")
+
+ if state == "present":
+ self.ensure_cagw_present(vpc_id, tags, purge_tags)
+ elif state == "absent":
+ self.ensure_cagw_absent(vpc_id)
+
+ def get_matching_cagw(self, vpc_id, carrier_gateway_id=None):
+ """
+ Returns the carrier gateway found.
+ Parameters:
+ vpc_id (str): VPC ID
+ carrier_gateway_id (str): Carrier Gateway ID, if specified
+ Returns:
+ cagw (dict): dict of cagw found, None if none found
+ """
+ filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id})
+ try:
+ if not carrier_gateway_id:
+ cagws = describe_cagws_with_backoff(
+ self._connection,
+ Filters=filters,
+ )
+ else:
+ cagws = describe_cagws_with_backoff(
+ self._connection,
+ CarrierGatewayIds=[carrier_gateway_id],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ cagw = None
+ if len(cagws) > 1:
+ self._module.fail_json(msg=f"EC2 returned more than one Carrier Gateway for VPC {vpc_id}, aborting")
+ elif cagws:
+ cagw = camel_dict_to_snake_dict(cagws[0])
+
+ return cagw
+
+ @staticmethod
+ def get_cagw_info(cagw, vpc_id):
+ return {
+ "carrier_gateway_id": cagw["carrier_gateway_id"],
+ "tags": boto3_tag_list_to_ansible_dict(cagw["tags"]),
+ "vpc_id": vpc_id,
+ }
+
+ def ensure_cagw_absent(self, vpc_id):
+ cagw = self.get_matching_cagw(vpc_id)
+ if cagw is None:
+ return self._results
+
+ if self._check_mode:
+ self._results["changed"] = True
+ return self._results
+
+ try:
+ self._results["changed"] = True
+ self._connection.delete_carrier_gateway(
+ aws_retry=True,
+ CarrierGatewayId=cagw["carrier_gateway_id"],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Unable to delete Carrier Gateway")
+
+ return self._results
+
+ def ensure_cagw_present(self, vpc_id, tags, purge_tags):
+ cagw = self.get_matching_cagw(vpc_id)
+
+ if cagw is None:
+ if self._check_mode:
+ self._results["changed"] = True
+ self._results["carrier_gateway_id"] = None
+ return self._results
+
+ try:
+ response = self._connection.create_carrier_gateway(VpcId=vpc_id, aws_retry=True)
+ cagw = camel_dict_to_snake_dict(response["CarrierGateway"])
+ self._results["changed"] = True
+ except is_boto3_error_message("You must be opted into a wavelength zone to create a carrier gateway.") as e:
+ self._module.fail_json(msg="You must be opted into a wavelength zone to create a carrier gateway")
+ except botocore.exceptions.WaiterError as e:
+ self._module.fail_json_aws(e, msg="No Carrier Gateway exists.")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Unable to create Carrier Gateway")
+
+ # Modify tags
+ self._results["changed"] |= ensure_ec2_tags(
+ self._connection,
+ self._module,
+ cagw["carrier_gateway_id"],
+ resource_type="carrier-gateway",
+ tags=tags,
+ purge_tags=purge_tags,
+ retry_codes="InvalidCarrierGatewayID.NotFound",
+ )
+
+ # Update cagw
+ cagw = self.get_matching_cagw(vpc_id, carrier_gateway_id=cagw["carrier_gateway_id"])
+ cagw_info = self.get_cagw_info(cagw, vpc_id)
+ self._results.update(cagw_info)
+
+ return self._results
+
+
+def main():
+ argument_spec = dict(
+ carrier_gateway_id=dict(required=False),
+ vpc_id=dict(required=True),
+ state=dict(default="present", choices=["present", "absent"]),
+ tags=dict(required=False, type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=[["vpc_id", "carrier_gateway_id"]],
+ supports_check_mode=True,
+ )
+ results = dict(
+ changed=False,
+ )
+ cagw_manager = AnsibleEc2Cagw(module=module, results=results)
+ cagw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py
new file mode 100644
index 000000000..67ee30e55
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ec2_carrier_gateway_info.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: ec2_carrier_gateway_info
+version_added: 6.0.0
+short_description: Gather information about carrier gateways in AWS
+description:
+ - Gather information about carrier gateways in AWS.
+author:
+ - "Marco Braga (@mtulio)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCarrierGateways.html) for possible filters.
+ required: false
+ default: {}
+ type: dict
+ carrier_gateway_ids:
+ description:
+ - Get details of specific Carrier Gateway ID.
+ required: false
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather information about all Carrier Gateways for an account or profile
+ community.aws.ec2_carrier_gateway_info:
+ region: ap-southeast-2
+ register: cagw_info
+
+- name: Gather information about a filtered list of Carrier Gateways
+ community.aws.ec2_carrier_gateway_info:
+ region: ap-southeast-2
+ filters:
+ "tag:Name": "cagw-123"
+ register: cagw_info
+
+- name: Gather information about a specific carrier gateway by CarrierGatewayId
+ community.aws.ec2_carrier_gateway_info:
+ region: ap-southeast-2
+ carrier_gateway_ids: cagw-c1231234
+ register: cagw_info
+"""
+
+RETURN = r"""
+changed:
+ description: True if listing the carrier gateways succeeds.
+ type: bool
+ returned: always
+ sample: "false"
+carrier_gateways:
+ description: The carrier gateways for the account.
+ returned: always
+ type: complex
+ contains:
+ vpc_id:
+ description: The ID of the VPC.
+ returned: I(state=present)
+ type: str
+ sample: vpc-02123b67
+ carrier_gateway_id:
+ description: The ID of the carrier gateway.
+ returned: I(state=present)
+ type: str
+ sample: cagw-2123634d
+ tags:
+ description: Any tags assigned to the carrier gateway.
+ returned: I(state=present)
+ type: dict
+ sample:
+ tags:
+ "Ansible": "Test"
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+
+def get_carrier_gateway_info(carrier_gateway):
+ tags = boto3_tag_list_to_ansible_dict(carrier_gateway["Tags"])
+ ignore_list = []
+ carrier_gateway_info = {
+ "CarrierGatewayId": carrier_gateway["CarrierGatewayId"],
+ "VpcId": carrier_gateway["VpcId"],
+ "Tags": tags,
+ }
+
+ carrier_gateway_info = camel_dict_to_snake_dict(carrier_gateway_info, ignore_list=ignore_list)
+ return carrier_gateway_info
+
+
+def list_carrier_gateways(connection, module):
+ params = dict()
+
+ params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ if module.params.get("carrier_gateway_ids"):
+ params["CarrierGatewayIds"] = module.params.get("carrier_gateway_ids")
+
+ try:
+ all_carrier_gateways = connection.describe_carrier_gateways(aws_retry=True, **params)
+ except is_boto3_error_code("InvalidCarrierGatewayID.NotFound"):
+ module.fail_json("CarrierGateway not found")
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, "Unable to describe carrier gateways")
+
+ return [get_carrier_gateway_info(cagw) for cagw in all_carrier_gateways["CarrierGateways"]]
+
+
+def main():
+ argument_spec = dict(
+ carrier_gateway_ids=dict(default=None, elements="str", type="list"),
+ filters=dict(default={}, type="dict"),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Validate Requirements
+ try:
+ connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
+
+ results = list_carrier_gateways(connection, module)
+
+ module.exit_json(carrier_gateways=results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
index 3b176b5ee..19fc8eab7 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway.py
@@ -1,25 +1,24 @@
#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ec2_customer_gateway
version_added: 1.0.0
short_description: Manage an AWS customer gateway
description:
- - Manage an AWS customer gateway.
-author: Michael Baydoun (@MichaelBaydoun)
+ - Manage an AWS customer gateway.
+author:
+ - Michael Baydoun (@MichaelBaydoun)
notes:
- - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
- first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
- requests do not create new customer gateway resources.
- - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
- customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
+ - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
+ first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
+ requests do not create new customer gateway resources.
+ - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
+ customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
options:
bgp_asn:
description:
@@ -49,13 +48,12 @@ options:
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create Customer Gateway
community.aws.ec2_customer_gateway:
bgp_asn: 12345
@@ -71,9 +69,9 @@ EXAMPLES = '''
state: absent
region: us-east-1
register: cgw
-'''
+"""
-RETURN = '''
+RETURN = r"""
gateway.customer_gateways:
description: details about the gateway that was created.
returned: success
@@ -108,7 +106,7 @@ gateway.customer_gateways:
returned: when gateway exists and is available.
sample: ipsec.1
type: str
-'''
+"""
try:
import botocore
@@ -117,26 +115,23 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-class Ec2CustomerGatewayManager:
+class Ec2CustomerGatewayManager:
def __init__(self, module):
self.module = module
try:
- self.ec2 = module.client('ec2')
+ self.ec2 = module.client("ec2")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])
+ @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=["IncorrectState"])
def ensure_cgw_absent(self, gw_id):
- response = self.ec2.delete_customer_gateway(
- DryRun=False,
- CustomerGatewayId=gw_id
- )
+ response = self.ec2.delete_customer_gateway(DryRun=False, CustomerGatewayId=gw_id)
return response
def ensure_cgw_present(self, bgp_asn, ip_address):
@@ -144,7 +139,7 @@ class Ec2CustomerGatewayManager:
bgp_asn = 65000
response = self.ec2.create_customer_gateway(
DryRun=False,
- Type='ipsec.1',
+ Type="ipsec.1",
PublicIp=ip_address,
BgpAsn=bgp_asn,
)
@@ -157,11 +152,8 @@ class Ec2CustomerGatewayManager:
gw_id,
],
Tags=[
- {
- 'Key': 'Name',
- 'Value': name
- },
- ]
+ {"Key": "Name", "Value": name},
+ ],
)
return response
@@ -170,86 +162,84 @@ class Ec2CustomerGatewayManager:
DryRun=False,
Filters=[
{
- 'Name': 'state',
- 'Values': [
- 'available',
- ]
+ "Name": "state",
+ "Values": [
+ "available",
+ ],
},
{
- 'Name': 'ip-address',
- 'Values': [
+ "Name": "ip-address",
+ "Values": [
ip_address,
- ]
- }
- ]
+ ],
+ },
+ ],
)
return response
def main():
argument_spec = dict(
- bgp_asn=dict(required=False, type='int'),
+ bgp_asn=dict(required=False, type="int"),
ip_address=dict(required=True),
name=dict(required=True),
- routing=dict(default='dynamic', choices=['dynamic', 'static']),
- state=dict(default='present', choices=['present', 'absent']),
+ routing=dict(default="dynamic", choices=["dynamic", "static"]),
+ state=dict(default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
- ('routing', 'dynamic', ['bgp_asn'])
- ]
+ ("routing", "dynamic", ["bgp_asn"]),
+ ],
)
gw_mgr = Ec2CustomerGatewayManager(module)
- name = module.params.get('name')
+ name = module.params.get("name")
- existing = gw_mgr.describe_gateways(module.params['ip_address'])
+ existing = gw_mgr.describe_gateways(module.params["ip_address"])
results = dict(changed=False)
- if module.params['state'] == 'present':
- if existing['CustomerGateways']:
- existing['CustomerGateway'] = existing['CustomerGateways'][0]
- results['gateway'] = existing
- if existing['CustomerGateway']['Tags']:
- tag_array = existing['CustomerGateway']['Tags']
+ if module.params["state"] == "present":
+ if existing["CustomerGateways"]:
+ existing["CustomerGateway"] = existing["CustomerGateways"][0]
+ results["gateway"] = existing
+ if existing["CustomerGateway"]["Tags"]:
+ tag_array = existing["CustomerGateway"]["Tags"]
for key, value in enumerate(tag_array):
- if value['Key'] == 'Name':
- current_name = value['Value']
+ if value["Key"] == "Name":
+ current_name = value["Value"]
if current_name != name:
- results['name'] = gw_mgr.tag_cgw_name(
- results['gateway']['CustomerGateway']['CustomerGatewayId'],
- module.params['name'],
+ results["name"] = gw_mgr.tag_cgw_name(
+ results["gateway"]["CustomerGateway"]["CustomerGatewayId"],
+ module.params["name"],
)
- results['changed'] = True
+ results["changed"] = True
else:
if not module.check_mode:
- results['gateway'] = gw_mgr.ensure_cgw_present(
- module.params['bgp_asn'],
- module.params['ip_address'],
+ results["gateway"] = gw_mgr.ensure_cgw_present(
+ module.params["bgp_asn"],
+ module.params["ip_address"],
)
- results['name'] = gw_mgr.tag_cgw_name(
- results['gateway']['CustomerGateway']['CustomerGatewayId'],
- module.params['name'],
+ results["name"] = gw_mgr.tag_cgw_name(
+ results["gateway"]["CustomerGateway"]["CustomerGatewayId"],
+ module.params["name"],
)
- results['changed'] = True
+ results["changed"] = True
- elif module.params['state'] == 'absent':
- if existing['CustomerGateways']:
- existing['CustomerGateway'] = existing['CustomerGateways'][0]
- results['gateway'] = existing
+ elif module.params["state"] == "absent":
+ if existing["CustomerGateways"]:
+ existing["CustomerGateway"] = existing["CustomerGateways"][0]
+ results["gateway"] = existing
if not module.check_mode:
- results['gateway'] = gw_mgr.ensure_cgw_absent(
- existing['CustomerGateway']['CustomerGatewayId']
- )
- results['changed'] = True
+ results["gateway"] = gw_mgr.ensure_cgw_absent(existing["CustomerGateway"]["CustomerGatewayId"])
+ results["changed"] = True
pretty_results = camel_dict_to_snake_dict(results)
module.exit_json(**pretty_results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
index 429ba2083..18c1a366a 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_customer_gateway_info.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_customer_gateway_info
version_added: 1.0.0
short_description: Gather information about customer gateways in AWS
description:
- - Gather information about customer gateways in AWS.
-author: Madhura Naniwadekar (@Madhura-CSI)
+ - Gather information about customer gateways in AWS.
+author:
+ - Madhura Naniwadekar (@Madhura-CSI)
options:
filters:
description:
@@ -28,13 +27,12 @@ options:
elements: str
default: []
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all customer gateways
@@ -55,9 +53,9 @@ EXAMPLES = r'''
- 'cgw-48841a09'
- 'cgw-fec021ce'
register: cust_gw_info
-'''
+"""
-RETURN = r'''
+RETURN = r"""
customer_gateways:
description: List of one or more customer gateways.
returned: always
@@ -78,60 +76,65 @@ customer_gateways:
"type": "ipsec.1"
}
]
-'''
+"""
import json
+
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- )
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+ return obj.isoformat() if hasattr(obj, "isoformat") else obj
def list_customer_gateways(connection, module):
params = dict()
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
+ params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ params["CustomerGatewayIds"] = module.params.get("customer_gateway_ids")
try:
result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
- snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
+ snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result["CustomerGateways"]]
if snaked_customer_gateways:
for customer_gateway in snaked_customer_gateways:
- customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
- customer_gateway_name = customer_gateway['tags'].get('Name')
+ customer_gateway["tags"] = boto3_tag_list_to_ansible_dict(customer_gateway.get("tags", []))
+ customer_gateway_name = customer_gateway["tags"].get("Name")
if customer_gateway_name:
- customer_gateway['customer_gateway_name'] = customer_gateway_name
+ customer_gateway["customer_gateway_name"] = customer_gateway_name
module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
def main():
-
argument_spec = dict(
- customer_gateway_ids=dict(default=[], type='list', elements='str'),
- filters=dict(default={}, type='dict')
+ customer_gateway_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict")
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[['customer_gateway_ids', 'filters']],
- supports_check_mode=True)
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ["customer_gateway_ids", "filters"],
+ ],
+ supports_check_mode=True,
+ )
- connection = module.client('ec2')
+ connection = module.client("ec2")
list_customer_gateways(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py b/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
index 67fb0f43b..9fd32711f 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_launch_template.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_launch_template
version_added: 1.0.0
@@ -16,10 +15,6 @@ description:
- The M(amazon.aws.ec2_instance) and M(community.aws.autoscaling_group) modules can, instead of specifying all
parameters on those tasks, be passed a Launch Template which contains
settings like instance size, disk type, subnet, and more.
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
author:
- Ryan Scott Brown (@ryansb)
options:
@@ -373,7 +368,6 @@ options:
type: str
description: >
- Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)).
- - Requires botocore >= 1.21.29
choices: [enabled, disabled]
default: 'disabled'
instance_metadata_tags:
@@ -381,12 +375,15 @@ options:
type: str
description:
- Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)).
- - Requires botocore >= 1.23.30
choices: [enabled, disabled]
default: 'disabled'
-'''
+extends_documentation_fragment:
+- amazon.aws.common.modules
+- amazon.aws.region.modules
+- amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create an ec2 launch template
community.aws.ec2_launch_template:
name: "my_template"
@@ -410,9 +407,9 @@ EXAMPLES = '''
state: absent
# This module does not yet allow deletion of specific versions of launch templates
-'''
+"""
-RETURN = '''
+RETURN = r"""
latest_version:
description: Latest available version of the launch template
returned: when state=present
@@ -421,82 +418,110 @@ default_version:
description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always.
returned: when state=present
type: int
-'''
-import re
+"""
+
from uuid import uuid4
+try:
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import WaiterError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters
-try:
- from botocore.exceptions import ClientError, BotoCoreError, WaiterError
-except ImportError:
- pass # caught by AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def determine_iam_role(module, name_or_arn):
- if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
- return {'arn': name_or_arn}
- iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"):
+ return {"arn": name_or_arn}
+ iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff())
try:
role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
- return {'arn': role['InstanceProfile']['Arn']}
- except is_boto3_error_code('NoSuchEntity') as e:
- module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
+ return {"arn": role["InstanceProfile"]["Arn"]}
+ except is_boto3_error_code("NoSuchEntity") as e:
+ module.fail_json_aws(e, msg=f"Could not find instance_role {name_or_arn}")
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
+ module.fail_json_aws(
+ e,
+ msg=f"An error occurred while searching for instance_role {name_or_arn}. Please try supplying the full ARN.",
+ )
def existing_templates(module):
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
matches = None
try:
- if module.params.get('template_id'):
- matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')], aws_retry=True)
- elif module.params.get('template_name'):
- matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')], aws_retry=True)
- except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e:
+ if module.params.get("template_id"):
+ matches = ec2.describe_launch_templates(
+ LaunchTemplateIds=[module.params.get("template_id")], aws_retry=True
+ )
+ elif module.params.get("template_name"):
+ matches = ec2.describe_launch_templates(
+ LaunchTemplateNames=[module.params.get("template_name")], aws_retry=True
+ )
+ except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException") as e:
# no named template was found, return nothing/empty versions
return None, []
- except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format(
- module.params.get('launch_template_id')))
- except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except
+ except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except
module.fail_json_aws(
- e, msg='Launch template with ID {0} could not be found, please supply a name '
- 'instead so that a new template can be created'.format(module.params.get('launch_template_id')))
+ e,
+ msg=(
+ f"Launch template with ID {module.params.get('launch_template_id')} is not a valid ID. It should start"
+ " with `lt-....`"
+ ),
+ )
+ except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e,
+ msg=(
+ f"Launch template with ID {module.params.get('launch_template_id')} could not be found, please supply a"
+ " name instead so that a new template can be created"
+ ),
+ )
except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.')
+ module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.")
else:
- template = matches['LaunchTemplates'][0]
- template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber']
+ template = matches["LaunchTemplates"][0]
+ template_id, template_version, template_default = (
+ template["LaunchTemplateId"],
+ template["LatestVersionNumber"],
+ template["DefaultVersionNumber"],
+ )
try:
- return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)['LaunchTemplateVersions']
+ return (
+ template,
+ ec2.describe_launch_template_versions(LaunchTemplateId=template_id, aws_retry=True)[
+ "LaunchTemplateVersions"
+ ],
+ )
except (ClientError, BotoCoreError, WaiterError) as e:
- module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
+ module.fail_json_aws(
+ e,
+ msg=f"Could not find launch template versions for {template['LaunchTemplateName']} (ID: {template_id}).",
+ )
def params_to_launch_data(module, template_params):
- if template_params.get('tags'):
- tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags'))
- template_params['tag_specifications'] = [
- {
- 'resource_type': r_type,
- 'tags': tag_list
- }
- for r_type in ('instance', 'volume')
+ if template_params.get("tags"):
+ tag_list = ansible_dict_to_boto3_tag_list(template_params.get("tags"))
+ template_params["tag_specifications"] = [
+ {"resource_type": r_type, "tags": tag_list} for r_type in ("instance", "volume")
]
- del template_params['tags']
- if module.params.get('iam_instance_profile'):
- template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile'])
+ del template_params["tags"]
+ if module.params.get("iam_instance_profile"):
+ template_params["iam_instance_profile"] = determine_iam_role(module, module.params["iam_instance_profile"])
params = snake_dict_to_camel_dict(
dict((k, v) for k, v in template_params.items() if v is not None),
capitalize_first=True,
@@ -505,71 +530,61 @@ def params_to_launch_data(module, template_params):
def delete_template(module):
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
template, template_versions = existing_templates(module)
deleted_versions = []
if template or template_versions:
- non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']]
+ non_default_versions = [to_text(t["VersionNumber"]) for t in template_versions if not t["DefaultVersion"]]
if non_default_versions:
try:
v_resp = ec2.delete_launch_template_versions(
- LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateId=template["LaunchTemplateId"],
Versions=non_default_versions,
aws_retry=True,
)
- if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']:
- module.warn('Failed to delete template versions {0} on launch template {1}'.format(
- v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'],
- template['LaunchTemplateId'],
- ))
- deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']]
+ if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]:
+ module.warn(
+ f"Failed to delete template versions {v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']} on"
+ f" launch template {template['LaunchTemplateId']}"
+ )
+ deleted_versions = [
+ camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"]
+ ]
except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId']))
+ module.fail_json_aws(
+ e,
+ msg=f"Could not delete existing versions of the launch template {template['LaunchTemplateId']}",
+ )
try:
resp = ec2.delete_launch_template(
- LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateId=template["LaunchTemplateId"],
aws_retry=True,
)
except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId']))
+ module.fail_json_aws(e, msg=f"Could not delete launch template {template['LaunchTemplateId']}")
return {
- 'deleted_versions': deleted_versions,
- 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']),
- 'changed': True,
+ "deleted_versions": deleted_versions,
+ "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]),
+ "changed": True,
}
else:
- return {'changed': False}
+ return {"changed": False}
def create_or_update(module, template_options):
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound']))
+ ec2 = module.client(
+ "ec2", retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidLaunchTemplateId.NotFound"])
+ )
template, template_versions = existing_templates(module)
out = {}
lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options))
lt_data = scrub_none_parameters(lt_data, descend_into_lists=True)
- if lt_data.get('MetadataOptions'):
- if not module.botocore_at_least('1.23.30'):
- # fail only if enabled is requested
- if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled':
- module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags')
- # pop if it's not requested to keep backwards compatibility.
- # otherwise the modules failes because parameters are set due default values
- lt_data['MetadataOptions'].pop('InstanceMetadataTags')
-
- if not module.botocore_at_least('1.21.29'):
- # fail only if enabled is requested
- if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled':
- module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6')
- # pop if it's not requested to keep backwards compatibility.
- # otherwise the modules failes because parameters are set due default values
- lt_data['MetadataOptions'].pop('HttpProtocolIpv6')
-
if not (template or template_versions):
# create a full new one
try:
resp = ec2.create_launch_template(
- LaunchTemplateName=module.params['template_name'],
+ LaunchTemplateName=module.params["template_name"],
LaunchTemplateData=lt_data,
ClientToken=uuid4().hex,
aws_retry=True,
@@ -577,26 +592,26 @@ def create_or_update(module, template_options):
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create launch template")
template, template_versions = existing_templates(module)
- out['changed'] = True
+ out["changed"] = True
elif template and template_versions:
most_recent = sorted(template_versions, key=lambda x: x["VersionNumber"])[-1]
if lt_data == most_recent["LaunchTemplateData"] and module.params["version_description"] == most_recent.get(
"VersionDescription", ""
):
- out['changed'] = False
+ out["changed"] = False
return out
try:
- if module.params.get('source_version') in (None, ''):
+ if module.params.get("source_version") in (None, ""):
resp = ec2.create_launch_template_version(
- LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateId=template["LaunchTemplateId"],
LaunchTemplateData=lt_data,
ClientToken=uuid4().hex,
VersionDescription=str(module.params["version_description"]),
aws_retry=True,
)
- elif module.params.get('source_version') == 'latest':
+ elif module.params.get("source_version") == "latest":
resp = ec2.create_launch_template_version(
- LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateId=template["LaunchTemplateId"],
LaunchTemplateData=lt_data,
ClientToken=uuid4().hex,
SourceVersion=str(most_recent["VersionNumber"]),
@@ -605,15 +620,22 @@ def create_or_update(module, template_options):
)
else:
try:
- int(module.params.get('source_version'))
+ int(module.params.get("source_version"))
except ValueError:
- module.fail_json(msg='source_version param was not a valid integer, got "{0}"'.format(module.params.get('source_version')))
+ module.fail_json(
+ msg=f"source_version param was not a valid integer, got \"{module.params.get('source_version')}\""
+ )
# get source template version
- source_version = next((v for v in template_versions if v['VersionNumber'] == int(module.params.get('source_version'))), None)
+ source_version = next(
+ (v for v in template_versions if v["VersionNumber"] == int(module.params.get("source_version"))),
+ None,
+ )
if source_version is None:
- module.fail_json(msg='source_version does not exist, got "{0}"'.format(module.params.get('source_version')))
+ module.fail_json(
+ msg=f"source_version does not exist, got \"{module.params.get('source_version')}\""
+ )
resp = ec2.create_launch_template_version(
- LaunchTemplateId=template['LaunchTemplateId'],
+ LaunchTemplateId=template["LaunchTemplateId"],
LaunchTemplateData=lt_data,
ClientToken=uuid4().hex,
SourceVersion=str(source_version["VersionNumber"]),
@@ -621,31 +643,33 @@ def create_or_update(module, template_options):
aws_retry=True,
)
- if module.params.get('default_version') in (None, ''):
+ if module.params.get("default_version") in (None, ""):
# no need to do anything, leave the existing version as default
pass
- elif module.params.get('default_version') == 'latest':
+ elif module.params.get("default_version") == "latest":
set_default = ec2.modify_launch_template(
- LaunchTemplateId=template['LaunchTemplateId'],
- DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']),
+ LaunchTemplateId=template["LaunchTemplateId"],
+ DefaultVersion=to_text(resp["LaunchTemplateVersion"]["VersionNumber"]),
ClientToken=uuid4().hex,
aws_retry=True,
)
else:
try:
- int(module.params.get('default_version'))
+ int(module.params.get("default_version"))
except ValueError:
- module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version')))
+ module.fail_json(
+ msg=f"default_version param was not a valid integer, got \"{module.params.get('default_version')}\""
+ )
set_default = ec2.modify_launch_template(
- LaunchTemplateId=template['LaunchTemplateId'],
- DefaultVersion=to_text(int(module.params.get('default_version'))),
+ LaunchTemplateId=template["LaunchTemplateId"],
+ DefaultVersion=to_text(int(module.params.get("default_version"))),
ClientToken=uuid4().hex,
aws_retry=True,
)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create subsequent launch template version")
template, template_versions = existing_templates(module)
- out['changed'] = True
+ out["changed"] = True
return out
@@ -655,43 +679,38 @@ def format_module_output(module):
template = camel_dict_to_snake_dict(template)
template_versions = [camel_dict_to_snake_dict(v) for v in template_versions]
for v in template_versions:
- for ts in (v['launch_template_data'].get('tag_specifications') or []):
- ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags'))
+ for ts in v["launch_template_data"].get("tag_specifications") or []:
+ ts["tags"] = boto3_tag_list_to_ansible_dict(ts.pop("tags"))
output.update(dict(template=template, versions=template_versions))
- output['default_template'] = [
- v for v in template_versions
- if v.get('default_version')
- ][0]
- output['latest_template'] = [
- v for v in template_versions
- if (
- v.get('version_number') and
- int(v['version_number']) == int(template['latest_version_number'])
- )
+ output["default_template"] = [v for v in template_versions if v.get("default_version")][0]
+ output["latest_template"] = [
+ v
+ for v in template_versions
+ if (v.get("version_number") and int(v["version_number"]) == int(template["latest_version_number"]))
][0]
- if "version_number" in output['default_template']:
- output['default_version'] = output['default_template']['version_number']
- if "version_number" in output['latest_template']:
- output['latest_version'] = output['latest_template']['version_number']
+ if "version_number" in output["default_template"]:
+ output["default_version"] = output["default_template"]["version_number"]
+ if "version_number" in output["latest_template"]:
+ output["latest_version"] = output["latest_template"]["version_number"]
return output
def main():
template_options = dict(
block_device_mappings=dict(
- type='list',
- elements='dict',
+ type="list",
+ elements="dict",
options=dict(
device_name=dict(),
ebs=dict(
- type='dict',
+ type="dict",
options=dict(
- delete_on_termination=dict(type='bool'),
- encrypted=dict(type='bool'),
- iops=dict(type='int'),
+ delete_on_termination=dict(type="bool"),
+ encrypted=dict(type="bool"),
+ iops=dict(type="int"),
kms_key_id=dict(),
snapshot_id=dict(),
- volume_size=dict(type='int'),
+ volume_size=dict(type="int"),
volume_type=dict(),
),
),
@@ -700,39 +719,39 @@ def main():
),
),
cpu_options=dict(
- type='dict',
+ type="dict",
options=dict(
- core_count=dict(type='int'),
- threads_per_core=dict(type='int'),
+ core_count=dict(type="int"),
+ threads_per_core=dict(type="int"),
),
),
credit_specification=dict(
- dict(type='dict'),
+ dict(type="dict"),
options=dict(
cpu_credits=dict(),
),
),
- disable_api_termination=dict(type='bool'),
- ebs_optimized=dict(type='bool'),
+ disable_api_termination=dict(type="bool"),
+ ebs_optimized=dict(type="bool"),
elastic_gpu_specifications=dict(
options=dict(type=dict()),
- type='list',
- elements='dict',
+ type="list",
+ elements="dict",
),
iam_instance_profile=dict(),
image_id=dict(),
- instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']),
+ instance_initiated_shutdown_behavior=dict(choices=["stop", "terminate"]),
instance_market_options=dict(
- type='dict',
+ type="dict",
options=dict(
market_type=dict(),
spot_options=dict(
- type='dict',
+ type="dict",
options=dict(
- block_duration_minutes=dict(type='int'),
- instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']),
+ block_duration_minutes=dict(type="int"),
+ instance_interruption_behavior=dict(choices=["hibernate", "stop", "terminate"]),
max_price=dict(),
- spot_instance_type=dict(choices=['one-time', 'persistent']),
+ spot_instance_type=dict(choices=["one-time", "persistent"]),
),
),
),
@@ -741,32 +760,30 @@ def main():
kernel_id=dict(),
key_name=dict(),
monitoring=dict(
- type='dict',
- options=dict(
- enabled=dict(type='bool')
- ),
+ type="dict",
+ options=dict(enabled=dict(type="bool")),
),
metadata_options=dict(
- type='dict',
+ type="dict",
options=dict(
- http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'),
- http_put_response_hop_limit=dict(type='int', default=1),
- http_tokens=dict(choices=['optional', 'required'], default='optional'),
- http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'),
- instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'),
- )
+ http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"),
+ http_put_response_hop_limit=dict(type="int", default=1),
+ http_tokens=dict(choices=["optional", "required"], default="optional"),
+ http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"),
+ instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"),
+ ),
),
network_interfaces=dict(
- type='list',
- elements='dict',
+ type="list",
+ elements="dict",
options=dict(
- associate_public_ip_address=dict(type='bool'),
- delete_on_termination=dict(type='bool'),
+ associate_public_ip_address=dict(type="bool"),
+ delete_on_termination=dict(type="bool"),
description=dict(),
- device_index=dict(type='int'),
- groups=dict(type='list', elements='str'),
- ipv6_address_count=dict(type='int'),
- ipv6_addresses=dict(type='list', elements='str'),
+ device_index=dict(type="int"),
+ groups=dict(type="list", elements="str"),
+ ipv6_address_count=dict(type="int"),
+ ipv6_addresses=dict(type="list", elements="str"),
network_interface_id=dict(),
private_ip_address=dict(),
subnet_id=dict(),
@@ -780,12 +797,12 @@ def main():
host_id=dict(),
tenancy=dict(),
),
- type='dict',
+ type="dict",
),
ram_disk_id=dict(),
- security_group_ids=dict(type='list', elements='str'),
- security_groups=dict(type='list', elements='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
+ security_group_ids=dict(type="list", elements="str"),
+ security_groups=dict(type="list", elements="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
user_data=dict(),
)
@@ -803,25 +820,25 @@ def main():
module = AnsibleAWSModule(
argument_spec=arg_spec,
required_one_of=[
- ('template_name', 'template_id')
+ ("template_name", "template_id"),
],
- supports_check_mode=True
+ supports_check_mode=True,
)
- for interface in (module.params.get('network_interfaces') or []):
- if interface.get('ipv6_addresses'):
- interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']]
+ for interface in module.params.get("network_interfaces") or []:
+ if interface.get("ipv6_addresses"):
+ interface["ipv6_addresses"] = [{"ipv6_address": x} for x in interface["ipv6_addresses"]]
- if module.params.get('state') == 'present':
+ if module.params.get("state") == "present":
out = create_or_update(module, template_options)
out.update(format_module_output(module))
- elif module.params.get('state') == 'absent':
+ elif module.params.get("state") == "absent":
out = delete_template(module)
else:
- module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state')))
+ module.fail_json(msg=f"Unsupported value \"{module.params.get('state')}\" for `state` parameter")
module.exit_json(**out)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py b/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
index c27917df9..3cdb5be21 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_placement_group.py
@@ -1,22 +1,21 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ec2_placement_group
version_added: 1.0.0
short_description: Create or delete an EC2 Placement Group
description:
- - Create an EC2 Placement Group; if the placement group already exists,
- nothing is done. Or, delete an existing placement group. If the placement
- group is absent, do nothing. See also
- U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
-author: "Brad Macpherson (@iiibrad)"
+ - Create an EC2 Placement Group; if the placement group already exists,
+ nothing is done. Or, delete an existing placement group. If the placement
+ group is absent, do nothing. See also
+ U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
+author:
+ - "Brad Macpherson (@iiibrad)"
options:
name:
description:
@@ -45,12 +44,12 @@ options:
choices: [ 'cluster', 'spread', 'partition' ]
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide
# for details.
@@ -76,11 +75,9 @@ EXAMPLES = '''
community.aws.ec2_placement_group:
name: my-cluster
state: absent
+"""
-'''
-
-
-RETURN = '''
+RETURN = r"""
placement_group:
description: Placement group attributes
returned: when state != absent
@@ -98,17 +95,17 @@ placement_group:
description: PG strategy
type: str
sample: "cluster"
-
-'''
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
@AWSRetry.exponential_backoff()
@@ -118,40 +115,32 @@ def search_placement_group(connection, module):
"""
name = module.params.get("name")
try:
- response = connection.describe_placement_groups(
- Filters=[{
- "Name": "group-name",
- "Values": [name]
- }])
+ response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e,
- msg="Couldn't find placement group named [%s]" % name)
+ module.fail_json_aws(e, msg=f"Couldn't find placement group named [{name}]")
- if len(response['PlacementGroups']) != 1:
+ if len(response["PlacementGroups"]) != 1:
return None
else:
- placement_group = response['PlacementGroups'][0]
+ placement_group = response["PlacementGroups"][0]
return {
- "name": placement_group['GroupName'],
- "state": placement_group['State'],
- "strategy": placement_group['Strategy'],
+ "name": placement_group["GroupName"],
+ "state": placement_group["State"],
+ "strategy": placement_group["Strategy"],
}
-@AWSRetry.exponential_backoff(catch_extra_error_codes=['InvalidPlacementGroup.Unknown'])
+@AWSRetry.exponential_backoff(catch_extra_error_codes=["InvalidPlacementGroup.Unknown"])
def get_placement_group_information(connection, name):
"""
Retrieve information about a placement group.
"""
- response = connection.describe_placement_groups(
- GroupNames=[name]
- )
- placement_group = response['PlacementGroups'][0]
+ response = connection.describe_placement_groups(GroupNames=[name])
+ placement_group = response["PlacementGroups"][0]
return {
- "name": placement_group['GroupName'],
- "state": placement_group['State'],
- "strategy": placement_group['Strategy'],
+ "name": placement_group["GroupName"],
+ "state": placement_group["State"],
+ "strategy": placement_group["Strategy"],
}
@@ -161,32 +150,34 @@ def create_placement_group(connection, module):
strategy = module.params.get("strategy")
partition_count = module.params.get("partition_count")
- if strategy != 'partition' and partition_count:
- module.fail_json(
- msg="'partition_count' can only be set when strategy is set to 'partition'.")
+ if strategy != "partition" and partition_count:
+ module.fail_json(msg="'partition_count' can only be set when strategy is set to 'partition'.")
params = {}
- params['GroupName'] = name
- params['Strategy'] = strategy
+ params["GroupName"] = name
+ params["Strategy"] = strategy
if partition_count:
- params['PartitionCount'] = partition_count
- params['DryRun'] = module.check_mode
+ params["PartitionCount"] = partition_count
+ params["DryRun"] = module.check_mode
try:
connection.create_placement_group(**params)
- except is_boto3_error_code('DryRunOperation'):
- module.exit_json(changed=True, placement_group={
- "name": name,
- "state": 'DryRun',
- "strategy": strategy,
- })
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(
- e,
- msg="Couldn't create placement group [%s]" % name)
-
- module.exit_json(changed=True,
- placement_group=get_placement_group_information(connection, name))
+ except is_boto3_error_code("DryRunOperation"):
+ module.exit_json(
+ changed=True,
+ placement_group={
+ "name": name,
+ "state": "DryRun",
+ "strategy": strategy,
+ },
+ )
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Couldn't create placement group [{name}]")
+
+ module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name))
@AWSRetry.exponential_backoff()
@@ -194,52 +185,42 @@ def delete_placement_group(connection, module):
name = module.params.get("name")
try:
- connection.delete_placement_group(
- GroupName=name, DryRun=module.check_mode)
+ connection.delete_placement_group(GroupName=name, DryRun=module.check_mode)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e,
- msg="Couldn't delete placement group [%s]" % name)
+ module.fail_json_aws(e, msg=f"Couldn't delete placement group [{name}]")
module.exit_json(changed=True)
def main():
argument_spec = dict(
- name=dict(required=True, type='str'),
- partition_count=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition'])
+ name=dict(required=True, type="str"),
+ partition_count=dict(type="int"),
+ state=dict(default="present", choices=["present", "absent"]),
+ strategy=dict(default="cluster", choices=["cluster", "spread", "partition"]),
)
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- connection = module.client('ec2')
+ connection = module.client("ec2")
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
placement_group = search_placement_group(connection, module)
if placement_group is None:
create_placement_group(connection, module)
else:
strategy = module.params.get("strategy")
- if placement_group['strategy'] == strategy:
- module.exit_json(
- changed=False, placement_group=placement_group)
+ if placement_group["strategy"] == strategy:
+ module.exit_json(changed=False, placement_group=placement_group)
else:
name = module.params.get("name")
module.fail_json(
- msg=("Placement group '{}' exists, can't change strategy" +
- " from '{}' to '{}'").format(
- name,
- placement_group['strategy'],
- strategy))
+ msg=f"Placement group '{name}' exists, can't change strategy from '{placement_group['strategy']}' to '{strategy}'"
+ )
- elif state == 'absent':
+ elif state == "absent":
placement_group = search_placement_group(connection, module)
if placement_group is None:
module.exit_json(changed=False)
@@ -247,5 +228,5 @@ def main():
delete_placement_group(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py b/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
index d22f133ae..05b37488c 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_placement_group_info.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_placement_group_info
version_added: 1.0.0
short_description: List EC2 Placement Group(s) details
description:
- - List details of EC2 Placement Group(s).
-author: "Brad Macpherson (@iiibrad)"
+ - List details of EC2 Placement Group(s).
+author:
+ - "Brad Macpherson (@iiibrad)"
options:
names:
description:
@@ -24,13 +23,12 @@ options:
required: false
default: []
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details or the AWS region,
# see the AWS Guide for details.
@@ -41,18 +39,17 @@ EXAMPLES = r'''
- name: List two placement groups.
community.aws.ec2_placement_group_info:
names:
- - my-cluster
- - my-other-cluster
+ - my-cluster
+ - my-other-cluster
register: specific_ec2_placement_groups
- ansible.builtin.debug:
msg: >
{{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }}
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
placement_groups:
description: Placement group attributes
returned: always
@@ -70,57 +67,61 @@ placement_groups:
description: PG strategy
type: str
sample: "cluster"
+"""
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
try:
- from botocore.exceptions import (BotoCoreError, ClientError)
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def get_placement_groups_details(connection, module):
names = module.params.get("names")
try:
if len(names) > 0:
response = connection.describe_placement_groups(
- Filters=[{
- "Name": "group-name",
- "Values": names
- }])
+ Filters=[
+ {
+ "Name": "group-name",
+ "Values": names,
+ }
+ ]
+ )
else:
response = connection.describe_placement_groups()
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(
- e,
- msg="Couldn't find placement groups named [%s]" % names)
+ module.fail_json_aws(e, msg=f"Couldn't find placement groups named [{names}]")
results = []
- for placement_group in response['PlacementGroups']:
- results.append({
- "name": placement_group['GroupName'],
- "state": placement_group['State'],
- "strategy": placement_group['Strategy'],
- })
+ for placement_group in response["PlacementGroups"]:
+ results.append(
+ {
+ "name": placement_group["GroupName"],
+ "state": placement_group["State"],
+ "strategy": placement_group["Strategy"],
+ }
+ )
return results
def main():
argument_spec = dict(
- names=dict(type='list', default=[], elements='str')
+ names=dict(type="list", default=[], elements="str"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- supports_check_mode=True
+ supports_check_mode=True,
)
- connection = module.client('ec2')
+ connection = module.client("ec2")
placement_groups = get_placement_groups_details(connection, module)
module.exit_json(changed=False, placement_groups=placement_groups)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py b/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
index f45be4417..2cf994caa 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_snapshot_copy.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ec2_snapshot_copy
version_added: 1.0.0
@@ -57,12 +54,12 @@ options:
author:
- Deepak Kothandan (@Deepakkothandan) <deepak.kdy@gmail.com>
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Basic Snapshot Copy
community.aws.ec2_snapshot_copy:
source_region: eu-central-1
@@ -84,7 +81,7 @@ EXAMPLES = '''
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
tags:
- Name: Snapshot-Name
+ Name: Snapshot-Name
- name: Encrypted Snapshot copy
community.aws.ec2_snapshot_copy:
@@ -100,24 +97,25 @@ EXAMPLES = '''
source_snapshot_id: snap-xxxxxxx
encrypted: true
kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
-'''
+"""
-RETURN = '''
+RETURN = r"""
snapshot_id:
description: snapshot id of the newly created snapshot
returned: when snapshot copy is successful
type: str
sample: "snap-e9095e8c"
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def copy_snapshot(module, ec2):
"""
@@ -128,34 +126,33 @@ def copy_snapshot(module, ec2):
"""
params = {
- 'SourceRegion': module.params.get('source_region'),
- 'SourceSnapshotId': module.params.get('source_snapshot_id'),
- 'Description': module.params.get('description')
+ "SourceRegion": module.params.get("source_region"),
+ "SourceSnapshotId": module.params.get("source_snapshot_id"),
+ "Description": module.params.get("description"),
}
- if module.params.get('encrypted'):
- params['Encrypted'] = True
+ if module.params.get("encrypted"):
+ params["Encrypted"] = True
- if module.params.get('kms_key_id'):
- params['KmsKeyId'] = module.params.get('kms_key_id')
+ if module.params.get("kms_key_id"):
+ params["KmsKeyId"] = module.params.get("kms_key_id")
- if module.params.get('tags'):
- params['TagSpecifications'] = boto3_tag_specifications(module.params.get('tags'), types=['snapshot'])
+ if module.params.get("tags"):
+ params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), types=["snapshot"])
try:
- snapshot_id = ec2.copy_snapshot(**params)['SnapshotId']
- if module.params.get('wait'):
+ snapshot_id = ec2.copy_snapshot(**params)["SnapshotId"]
+ if module.params.get("wait"):
delay = 15
# Add one to max_attempts as wait() increment
# its counter before assessing it for time.sleep()
- max_attempts = (module.params.get('wait_timeout') // delay) + 1
- ec2.get_waiter('snapshot_completed').wait(
- SnapshotIds=[snapshot_id],
- WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
+ max_attempts = (module.params.get("wait_timeout") // delay) + 1
+ ec2.get_waiter("snapshot_completed").wait(
+ SnapshotIds=[snapshot_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the snapshot to become available.')
+ module.fail_json_aws(e, msg="An error occurred waiting for the snapshot to become available.")
module.exit_json(changed=True, snapshot_id=snapshot_id)
@@ -164,23 +161,23 @@ def main():
argument_spec = dict(
source_region=dict(required=True),
source_snapshot_id=dict(required=True),
- description=dict(default=''),
- encrypted=dict(type='bool', default=False, required=False),
- kms_key_id=dict(type='str', required=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=600),
- tags=dict(type='dict', aliases=['resource_tags']),
+ description=dict(default=""),
+ encrypted=dict(type="bool", default=False, required=False),
+ kms_key_id=dict(type="str", required=False),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=600),
+ tags=dict(type="dict", aliases=["resource_tags"]),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
try:
- client = module.client('ec2')
+ client = module.client("ec2")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
copy_snapshot(module, client)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
index 298646cf8..19876984d 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: ec2_transit_gateway
short_description: Create and delete AWS Transit Gateways
version_added: 1.0.0
@@ -74,13 +72,13 @@ options:
author:
- "Bob Boldin (@BobBoldin)"
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
- amazon.aws.tags
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new transit gateway using defaults
community.aws.ec2_transit_gateway:
state: present
@@ -93,9 +91,9 @@ EXAMPLES = '''
asn: 64514
auto_associate: false
auto_propagate: false
- dns_support: True
+ dns_support: true
description: "nonprod transit gateway"
- purge_tags: False
+ purge_tags: false
state: present
region: us-east-1
tags:
@@ -114,9 +112,9 @@ EXAMPLES = '''
region: ap-southeast-2
transit_gateway_id: tgw-3a9aa123
register: deleted_tgw
-'''
+"""
-RETURN = '''
+RETURN = r"""
transit_gateway:
description: The attributes of the transit gateway.
type: complex
@@ -210,49 +208,53 @@ transit_gateway:
returned: always
type: str
sample: tgw-3a9aa123
-'''
+"""
+
+from time import sleep
+from time import time
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # handled by imported AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from time import sleep, time
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-class AnsibleEc2Tgw(object):
+class AnsibleEc2Tgw(object):
def __init__(self, module, results):
self._module = module
self._results = results
retry_decorator = AWSRetry.jittered_backoff(
- catch_extra_error_codes=['IncorrectState'],
+ catch_extra_error_codes=["IncorrectState"],
)
- connection = module.client('ec2', retry_decorator=retry_decorator)
+ connection = module.client("ec2", retry_decorator=retry_decorator)
self._connection = connection
self._check_mode = self._module.check_mode
def process(self):
- """ Process the request based on state parameter .
- state = present will search for an existing tgw based and return the object data.
- if no object is found it will be created
-
- state = absent will attempt to remove the tgw however will fail if it still has
- attachments or associations
- """
- description = self._module.params.get('description')
- state = self._module.params.get('state', 'present')
- tgw_id = self._module.params.get('transit_gateway_id')
-
- if state == 'present':
+ """Process the request based on state parameter .
+ state = present will search for an existing tgw based and return the object data.
+ if no object is found it will be created
+
+ state = absent will attempt to remove the tgw however will fail if it still has
+ attachments or associations
+ """
+ description = self._module.params.get("description")
+ state = self._module.params.get("state", "present")
+ tgw_id = self._module.params.get("transit_gateway_id")
+
+ if state == "present":
self.ensure_tgw_present(tgw_id, description)
- elif state == 'absent':
+ elif state == "absent":
self.ensure_tgw_absent(tgw_id, description)
def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True):
@@ -276,13 +278,13 @@ class AnsibleEc2Tgw(object):
if transit_gateway:
if self._check_mode:
- transit_gateway['state'] = status
+ transit_gateway["state"] = status
- if transit_gateway.get('state') == status:
+ if transit_gateway.get("state") == status:
status_achieved = True
break
- elif transit_gateway.get('state') == 'failed':
+ elif transit_gateway.get("state") == "failed":
break
else:
@@ -292,13 +294,12 @@ class AnsibleEc2Tgw(object):
self._module.fail_json_aws(e)
if not status_achieved:
- self._module.fail_json(
- msg="Wait time out reached, while waiting for results")
+ self._module.fail_json(msg="Wait time out reached, while waiting for results")
return transit_gateway
def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True):
- """ search for an existing tgw by either tgw_id or description
+ """search for an existing tgw by either tgw_id or description
:param tgw_id: The AWS id of the transit gateway
:param description: The description of the transit gateway.
:param skip_deleted: ignore deleted transit gateways
@@ -306,7 +307,7 @@ class AnsibleEc2Tgw(object):
"""
filters = []
if tgw_id:
- filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id})
+ filters = ansible_dict_to_boto3_filter_list({"transit-gateway-id": tgw_id})
try:
response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters)
@@ -316,20 +317,21 @@ class AnsibleEc2Tgw(object):
tgw = None
tgws = []
- if len(response.get('TransitGateways', [])) == 1 and tgw_id:
- if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted:
- tgws.extend(response['TransitGateways'])
+ if len(response.get("TransitGateways", [])) == 1 and tgw_id:
+ if (response["TransitGateways"][0]["State"] != "deleted") or not skip_deleted:
+ tgws.extend(response["TransitGateways"])
- for gateway in response.get('TransitGateways', []):
- if description == gateway['Description'] and gateway['State'] != 'deleted':
+ for gateway in response.get("TransitGateways", []):
+ if description == gateway["Description"] and gateway["State"] != "deleted":
tgws.append(gateway)
if len(tgws) > 1:
self._module.fail_json(
- msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description))
+ msg=f"EC2 returned more than one transit Gateway for description {description}, aborting"
+ )
elif tgws:
- tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags'])
- tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags'])
+ tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"])
+ tgw["tags"] = boto3_tag_list_to_ansible_dict(tgws[0]["Tags"])
return tgw
@@ -349,31 +351,31 @@ class AnsibleEc2Tgw(object):
:return dict: transit gateway object
"""
options = dict()
- wait = self._module.params.get('wait')
- wait_timeout = self._module.params.get('wait_timeout')
+ wait = self._module.params.get("wait")
+ wait_timeout = self._module.params.get("wait_timeout")
- if self._module.params.get('asn'):
- options['AmazonSideAsn'] = self._module.params.get('asn')
+ if self._module.params.get("asn"):
+ options["AmazonSideAsn"] = self._module.params.get("asn")
- options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach'))
- options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate'))
- options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate'))
- options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support'))
- options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support'))
+ options["AutoAcceptSharedAttachments"] = self.enable_option_flag(self._module.params.get("auto_attach"))
+ options["DefaultRouteTableAssociation"] = self.enable_option_flag(self._module.params.get("auto_associate"))
+ options["DefaultRouteTablePropagation"] = self.enable_option_flag(self._module.params.get("auto_propagate"))
+ options["VpnEcmpSupport"] = self.enable_option_flag(self._module.params.get("vpn_ecmp_support"))
+ options["DnsSupport"] = self.enable_option_flag(self._module.params.get("dns_support"))
try:
response = self._connection.create_transit_gateway(Description=description, Options=options)
except (ClientError, BotoCoreError) as e:
self._module.fail_json_aws(e)
- tgw_id = response['TransitGateway']['TransitGatewayId']
+ tgw_id = response["TransitGateway"]["TransitGatewayId"]
if wait:
result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available")
else:
result = self.get_matching_tgw(tgw_id=tgw_id)
- self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id']))
+ self._results["msg"] = f" Transit gateway {result['transit_gateway_id']} created"
return result
@@ -384,8 +386,8 @@ class AnsibleEc2Tgw(object):
:param tgw_id: The id of the transit gateway
:return dict: transit gateway object
"""
- wait = self._module.params.get('wait')
- wait_timeout = self._module.params.get('wait_timeout')
+ wait = self._module.params.get("wait")
+ wait_timeout = self._module.params.get("wait_timeout")
try:
response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id)
@@ -393,11 +395,13 @@ class AnsibleEc2Tgw(object):
self._module.fail_json_aws(e)
if wait:
- result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False)
+ result = self.wait_for_status(
+ wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False
+ )
else:
result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False)
- self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id))
+ self._results["msg"] = f" Transit gateway {tgw_id} deleted"
return result
@@ -414,25 +418,27 @@ class AnsibleEc2Tgw(object):
if tgw is None:
if self._check_mode:
- self._results['changed'] = True
- self._results['transit_gateway_id'] = None
+ self._results["changed"] = True
+ self._results["transit_gateway_id"] = None
return self._results
try:
if not description:
self._module.fail_json(msg="Failed to create Transit Gateway: description argument required")
tgw = self.create_tgw(description)
- self._results['changed'] = True
+ self._results["changed"] = True
except (BotoCoreError, ClientError) as e:
- self._module.fail_json_aws(e, msg='Unable to create Transit Gateway')
-
- self._results['changed'] |= ensure_ec2_tags(
- self._connection, self._module, tgw['transit_gateway_id'],
- tags=self._module.params.get('tags'),
- purge_tags=self._module.params.get('purge_tags'),
+ self._module.fail_json_aws(e, msg="Unable to create Transit Gateway")
+
+ self._results["changed"] |= ensure_ec2_tags(
+ self._connection,
+ self._module,
+ tgw["transit_gateway_id"],
+ tags=self._module.params.get("tags"),
+ purge_tags=self._module.params.get("purge_tags"),
)
- self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'])
+ self._results["transit_gateway"] = self.get_matching_tgw(tgw_id=tgw["transit_gateway_id"])
return self._results
@@ -444,21 +450,22 @@ class AnsibleEc2Tgw(object):
:param description: The description of the transit gateway.
:return doct: transit gateway object
"""
- self._results['transit_gateway_id'] = None
+ self._results["transit_gateway_id"] = None
tgw = self.get_matching_tgw(tgw_id, description)
if tgw is not None:
if self._check_mode:
- self._results['changed'] = True
+ self._results["changed"] = True
return self._results
try:
- tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id'])
- self._results['changed'] = True
- self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'],
- skip_deleted=False)
+ tgw = self.delete_tgw(tgw_id=tgw["transit_gateway_id"])
+ self._results["changed"] = True
+ self._results["transit_gateway"] = self.get_matching_tgw(
+ tgw_id=tgw["transit_gateway_id"], skip_deleted=False
+ )
except (BotoCoreError, ClientError) as e:
- self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway')
+ self._module.fail_json_aws(e, msg="Unable to delete Transit Gateway")
return self._results
@@ -470,24 +477,24 @@ def setup_module_object():
"""
argument_spec = dict(
- asn=dict(type='int'),
- auto_associate=dict(type='bool', default=True),
- auto_attach=dict(type='bool', default=False),
- auto_propagate=dict(type='bool', default=True),
- description=dict(type='str'),
- dns_support=dict(type='bool', default=True),
- purge_tags=dict(type='bool', default=True),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(type='dict', aliases=['resource_tags']),
- transit_gateway_id=dict(type='str'),
- vpn_ecmp_support=dict(type='bool', default=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=300)
+ asn=dict(type="int"),
+ auto_associate=dict(type="bool", default=True),
+ auto_attach=dict(type="bool", default=False),
+ auto_propagate=dict(type="bool", default=True),
+ description=dict(type="str"),
+ dns_support=dict(type="bool", default=True),
+ purge_tags=dict(type="bool", default=True),
+ state=dict(default="present", choices=["present", "absent"]),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ transit_gateway_id=dict(type="str"),
+ vpn_ecmp_support=dict(type="bool", default=True),
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(type="int", default=300),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_one_of=[('description', 'transit_gateway_id')],
+ required_one_of=[("description", "transit_gateway_id")],
supports_check_mode=True,
)
@@ -495,12 +502,9 @@ def setup_module_object():
def main():
-
module = setup_module_object()
- results = dict(
- changed=False
- )
+ results = dict(changed=False)
tgw_manager = AnsibleEc2Tgw(module=module, results=results)
tgw_manager.process()
@@ -508,5 +512,5 @@ def main():
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
index 5ce3dc6a4..b25346b84 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_info.py
@@ -1,19 +1,17 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: ec2_transit_gateway_info
short_description: Gather information about ec2 transit gateways in AWS
version_added: 1.0.0
description:
- - Gather information about ec2 transit gateways in AWS
-author: "Bob Boldin (@BobBoldin)"
+ - Gather information about ec2 transit gateways in AWS
+author:
+ - "Bob Boldin (@BobBoldin)"
options:
transit_gateway_ids:
description:
@@ -29,13 +27,12 @@ options:
type: dict
default: {}
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather info about all transit gateways
@@ -57,9 +54,9 @@ EXAMPLES = r'''
transit_gateway_ids:
- tgw-02c42332e6b7da829
- tgw-03c53443d5a8cb716
-'''
+"""
-RETURN = r'''
+RETURN = r"""
transit_gateways:
description: >
Transit gateways that match the provided filters. Each element consists of a dict with all the information
@@ -162,7 +159,7 @@ transit_gateways:
returned: always
type: str
sample: "tgw-02c42332e6b7da829"
-'''
+"""
try:
import botocore
@@ -171,19 +168,19 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-class AnsibleEc2TgwInfo(object):
+class AnsibleEc2TgwInfo(object):
def __init__(self, module, results):
self._module = module
self._results = results
- self._connection = self._module.client('ec2')
+ self._connection = self._module.client("ec2")
self._check_mode = self._module.check_mode
@AWSRetry.exponential_backoff()
@@ -195,8 +192,8 @@ class AnsibleEc2TgwInfo(object):
connection : boto3 client connection object
"""
# collect parameters
- filters = ansible_dict_to_boto3_filter_list(self._module.params['filters'])
- transit_gateway_ids = self._module.params['transit_gateway_ids']
+ filters = ansible_dict_to_boto3_filter_list(self._module.params["filters"])
+ transit_gateway_ids = self._module.params["transit_gateway_ids"]
# init empty list for return vars
transit_gateway_info = list()
@@ -204,17 +201,18 @@ class AnsibleEc2TgwInfo(object):
# Get the basic transit gateway info
try:
response = self._connection.describe_transit_gateways(
- TransitGatewayIds=transit_gateway_ids, Filters=filters)
- except is_boto3_error_code('InvalidTransitGatewayID.NotFound'):
- self._results['transit_gateways'] = []
+ TransitGatewayIds=transit_gateway_ids, Filters=filters
+ )
+ except is_boto3_error_code("InvalidTransitGatewayID.NotFound"):
+ self._results["transit_gateways"] = []
return
- for transit_gateway in response['TransitGateways']:
- transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags']))
+ for transit_gateway in response["TransitGateways"]:
+ transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=["Tags"]))
# convert tag list to ansible dict
- transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', []))
+ transit_gateway_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(transit_gateway.get("Tags", []))
- self._results['transit_gateways'] = transit_gateway_info
+ self._results["transit_gateways"] = transit_gateway_info
return
@@ -225,8 +223,8 @@ def setup_module_object():
"""
argument_spec = dict(
- transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']),
- filters=dict(type='dict', default={})
+ transit_gateway_ids=dict(type="list", default=[], elements="str", aliases=["transit_gateway_id"]),
+ filters=dict(type="dict", default={}),
)
module = AnsibleAWSModule(
@@ -238,12 +236,9 @@ def setup_module_object():
def main():
-
module = setup_module_object()
- results = dict(
- changed=False
- )
+ results = dict(changed=False)
tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results)
try:
@@ -254,5 +249,5 @@ def main():
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py
index 554059021..cfb6809a8 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: ec2_transit_gateway_vpc_attachment
short_description: Create and delete AWS Transit Gateway VPC attachments
version_added: 4.0.0
@@ -98,26 +96,26 @@ options:
author:
- "Mark Chappell (@tremble)"
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create a Transit Gateway attachment
- community.aws.ec2_transit_gateway_vpc_attachment:
state: present
transit_gateway: 'tgw-123456789abcdef01'
name: AnsibleTest-1
subnets:
- - subnet-00000000000000000
- - subnet-11111111111111111
- - subnet-22222222222222222
- ipv6_support: True
- purge_subnets: True
- dns_support: True
- appliance_mode_support: True
+ - subnet-00000000000000000
+ - subnet-11111111111111111
+ - subnet-22222222222222222
+ ipv6_support: true
+ purge_subnets: true
+ dns_support: true
+ appliance_mode_support: true
tags:
TestTag: changed data in Test Tag
@@ -126,18 +124,18 @@ EXAMPLES = '''
state: present
id: 'tgw-attach-0c0c5fd0b0f01d1c9'
name: AnsibleTest-1
- ipv6_support: True
- purge_subnets: False
- dns_support: False
- appliance_mode_support: True
+ ipv6_support: true
+ purge_subnets: false
+ dns_support: false
+ appliance_mode_support: true
# Delete the transit gateway
- community.aws.ec2_transit_gateway_vpc_attachment:
state: absent
id: 'tgw-attach-0c0c5fd0b0f01d1c9'
-'''
+"""
-RETURN = '''
+RETURN = r"""
transit_gateway_attachments:
description: The attributes of the Transit Gateway attachments.
type: list
@@ -216,34 +214,31 @@ transit_gateway_attachments:
type: str
returned: success
example: '123456789012'
-'''
-
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+"""
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager
def main():
-
argument_spec = dict(
- state=dict(type='str', required=False, default='present', choices=['absent', 'present']),
- transit_gateway=dict(type='str', required=False, aliases=['transit_gateway_id']),
- id=dict(type='str', required=False, aliases=['attachment_id']),
- name=dict(type='str', required=False),
- subnets=dict(type='list', elements='str', required=False),
- purge_subnets=dict(type='bool', required=False, default=True),
- tags=dict(type='dict', required=False, aliases=['resource_tags']),
- purge_tags=dict(type='bool', required=False, default=True),
- appliance_mode_support=dict(type='bool', required=False),
- dns_support=dict(type='bool', required=False),
- ipv6_support=dict(type='bool', required=False),
- wait=dict(type='bool', required=False, default=True),
- wait_timeout=dict(type='int', required=False),
+ state=dict(type="str", required=False, default="present", choices=["absent", "present"]),
+ transit_gateway=dict(type="str", required=False, aliases=["transit_gateway_id"]),
+ id=dict(type="str", required=False, aliases=["attachment_id"]),
+ name=dict(type="str", required=False),
+ subnets=dict(type="list", elements="str", required=False),
+ purge_subnets=dict(type="bool", required=False, default=True),
+ tags=dict(type="dict", required=False, aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", required=False, default=True),
+ appliance_mode_support=dict(type="bool", required=False),
+ dns_support=dict(type="bool", required=False),
+ ipv6_support=dict(type="bool", required=False),
+ wait=dict(type="bool", required=False, default=True),
+ wait_timeout=dict(type="int", required=False),
)
one_of = [
- ['id', 'transit_gateway', 'name'],
+ ["id", "transit_gateway", "name"],
]
module = AnsibleAWSModule(
@@ -252,55 +247,68 @@ def main():
required_one_of=one_of,
)
- attach_id = module.params.get('id', None)
- tgw = module.params.get('transit_gateway', None)
- name = module.params.get('name', None)
- tags = module.params.get('tags', None)
- purge_tags = module.params.get('purge_tags')
- state = module.params.get('state')
- subnets = module.params.get('subnets', None)
- purge_subnets = module.params.get('purge_subnets')
+ attach_id = module.params.get("id", None)
+ tgw = module.params.get("transit_gateway", None)
+ name = module.params.get("name", None)
+ tags = module.params.get("tags", None)
+ purge_tags = module.params.get("purge_tags")
+ state = module.params.get("state")
+ subnets = module.params.get("subnets", None)
+ purge_subnets = module.params.get("purge_subnets")
# When not provided with an ID see if one exists.
if not attach_id:
search_manager = TransitGatewayVpcAttachmentManager(module=module)
filters = dict()
if tgw:
- filters['transit-gateway-id'] = tgw
+ filters["transit-gateway-id"] = tgw
if name:
- filters['tag:Name'] = name
+ filters["tag:Name"] = name
if subnets:
vpc_id = search_manager.subnets_to_vpc(subnets)
- filters['vpc-id'] = vpc_id
+ filters["vpc-id"] = vpc_id
# Attachments lurk in a 'deleted' state, for a while, ignore them so we
# can reuse the names
- filters['state'] = [
- 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying',
- 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting'
+ filters["state"] = [
+ "available",
+ "deleting",
+ "failed",
+ "failing",
+ "initiatingRequest",
+ "modifying",
+ "pendingAcceptance",
+ "pending",
+ "rollingBack",
+ "rejected",
+ "rejecting",
]
attachments = search_manager.list(filters=filters)
if len(attachments) > 1:
- module.fail_json('Multiple matching attachments found, provide an ID', attachments=attachments)
+ module.fail_json("Multiple matching attachments found, provide an ID", attachments=attachments)
# If we find a match then we'll modify it by ID, otherwise we'll be
# creating a new RTB.
if attachments:
- attach_id = attachments[0]['transit_gateway_attachment_id']
+ attach_id = attachments[0]["transit_gateway_attachment_id"]
manager = TransitGatewayVpcAttachmentManager(module=module, id=attach_id)
- manager.set_wait(module.params.get('wait', None))
- manager.set_wait_timeout(module.params.get('wait_timeout', None))
+ manager.set_wait(module.params.get("wait", None))
+ manager.set_wait_timeout(module.params.get("wait_timeout", None))
- if state == 'absent':
+ if state == "absent":
manager.delete()
else:
if not attach_id:
if not tgw:
- module.fail_json('No existing attachment found. To create a new attachment'
- ' the `transit_gateway` parameter must be provided.')
+ module.fail_json(
+ "No existing attachment found. To create a new attachment"
+ " the `transit_gateway` parameter must be provided."
+ )
if not subnets:
- module.fail_json('No existing attachment found. To create a new attachment'
- ' the `subnets` parameter must be provided.')
+ module.fail_json(
+ "No existing attachment found. To create a new attachment"
+ " the `subnets` parameter must be provided."
+ )
# name is just a special case of tags.
if name:
@@ -314,9 +322,9 @@ def main():
manager.set_transit_gateway(tgw)
manager.set_subnets(subnets, purge_subnets)
manager.set_tags(tags, purge_tags)
- manager.set_dns_support(module.params.get('dns_support', None))
- manager.set_ipv6_support(module.params.get('ipv6_support', None))
- manager.set_appliance_mode_support(module.params.get('appliance_mode_support', None))
+ manager.set_dns_support(module.params.get("dns_support", None))
+ manager.set_ipv6_support(module.params.get("ipv6_support", None))
+ manager.set_appliance_mode_support(module.params.get("appliance_mode_support", None))
manager.flush_changes()
results = dict(
@@ -324,7 +332,7 @@ def main():
attachments=[manager.updated_resource],
)
if manager.changed:
- results['diff'] = dict(
+ results["diff"] = dict(
before=manager.original_resource,
after=manager.updated_resource,
)
@@ -332,5 +340,5 @@ def main():
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py
index b76b0b0f7..a665e4080 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_transit_gateway_vpc_attachment_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: ec2_transit_gateway_vpc_attachment_info
short_description: describes AWS Transit Gateway VPC attachments
version_added: 4.0.0
@@ -39,14 +37,15 @@ options:
type: bool
required: false
default: false
-author: "Mark Chappell (@tremble)"
+author:
+ - "Mark Chappell (@tremble)"
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Describe a specific Transit Gateway attachment.
- community.aws.ec2_transit_gateway_vpc_attachment_info:
id: 'tgw-attach-0123456789abcdef0'
@@ -60,9 +59,9 @@ EXAMPLES = '''
- community.aws.ec2_transit_gateway_vpc_attachment_info:
filters:
transit-gateway-id: tgw-0fedcba9876543210'
-'''
+"""
-RETURN = '''
+RETURN = r"""
transit_gateway_attachments:
description: The attributes of the Transit Gateway attachments.
type: list
@@ -141,26 +140,23 @@ transit_gateway_attachments:
type: str
returned: success
example: '123456789012'
-'''
-
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+"""
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.transitgateway import TransitGatewayVpcAttachmentManager
def main():
-
argument_spec = dict(
- id=dict(type='str', required=False, aliases=['attachment_id']),
- name=dict(type='str', required=False),
- filters=dict(type='dict', required=False),
- include_deleted=dict(type='bool', required=False, default=False)
+ id=dict(type="str", required=False, aliases=["attachment_id"]),
+ name=dict(type="str", required=False),
+ filters=dict(type="dict", required=False),
+ include_deleted=dict(type="bool", required=False, default=False),
)
mutually_exclusive = [
- ['id', 'name'],
- ['id', 'filters'],
+ ["id", "name"],
+ ["id", "filters"],
]
module = AnsibleAWSModule(
@@ -168,22 +164,31 @@ def main():
supports_check_mode=True,
)
- name = module.params.get('name', None)
- id = module.params.get('id', None)
- opt_filters = module.params.get('filters', None)
+ name = module.params.get("name", None)
+ id = module.params.get("id", None)
+ opt_filters = module.params.get("filters", None)
search_manager = TransitGatewayVpcAttachmentManager(module=module)
filters = dict()
if name:
- filters['tag:Name'] = name
+ filters["tag:Name"] = name
- if not module.params.get('include_deleted'):
+ if not module.params.get("include_deleted"):
# Attachments lurk in a 'deleted' state, for a while, ignore them so we
# can reuse the names
- filters['state'] = [
- 'available', 'deleting', 'failed', 'failing', 'initiatingRequest', 'modifying',
- 'pendingAcceptance', 'pending', 'rollingBack', 'rejected', 'rejecting'
+ filters["state"] = [
+ "available",
+ "deleting",
+ "failed",
+ "failing",
+ "initiatingRequest",
+ "modifying",
+ "pendingAcceptance",
+ "pending",
+ "rollingBack",
+ "rejected",
+ "rejecting",
]
if opt_filters:
@@ -194,5 +199,5 @@ def main():
module.exit_json(changed=False, attachments=attachments, filters=filters)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
index dbcf15b12..1bd65f501 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_egress_igw.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ec2_vpc_egress_igw
version_added: 1.0.0
short_description: Manage an AWS VPC Egress Only Internet gateway
description:
- - Manage an AWS VPC Egress Only Internet gateway
-author: Daniel Shepherd (@shepdelacreme)
+ - Manage an AWS VPC Egress Only Internet gateway
+author:
+ - Daniel Shepherd (@shepdelacreme)
options:
vpc_id:
description:
@@ -27,13 +26,12 @@ options:
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Ensure that the VPC has an Internet Gateway.
@@ -42,10 +40,9 @@ EXAMPLES = '''
vpc_id: vpc-abcdefgh
state: present
register: eigw
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
gateway_id:
description: The ID of the Egress Only Internet Gateway or Null.
returned: always
@@ -56,7 +53,7 @@ vpc_id:
returned: always
type: str
sample: vpc-012345678
-'''
+"""
try:
import botocore
@@ -65,9 +62,10 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def delete_eigw(module, connection, eigw_id):
@@ -82,16 +80,18 @@ def delete_eigw(module, connection, eigw_id):
try:
response = connection.delete_egress_only_internet_gateway(
- aws_retry=True,
- DryRun=module.check_mode,
- EgressOnlyInternetGatewayId=eigw_id)
- except is_boto3_error_code('DryRunOperation'):
+ aws_retry=True, DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id
+ )
+ except is_boto3_error_code("DryRunOperation"):
changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id))
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Could not delete Egress-Only Internet Gateway {eigw_id} from VPC {module.vpc_id}")
if not module.check_mode:
- changed = response.get('ReturnCode', False)
+ changed = response.get("ReturnCode", False)
return changed
@@ -109,29 +109,33 @@ def create_eigw(module, connection, vpc_id):
try:
response = connection.create_egress_only_internet_gateway(
- aws_retry=True,
- DryRun=module.check_mode,
- VpcId=vpc_id)
- except is_boto3_error_code('DryRunOperation'):
+ aws_retry=True, DryRun=module.check_mode, VpcId=vpc_id
+ )
+ except is_boto3_error_code("DryRunOperation"):
# When boto3 method is run with DryRun=True it returns an error on success
# We need to catch the error and return something valid
changed = True
- except is_boto3_error_code('InvalidVpcID.NotFound') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id))
+ except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"invalid vpc ID '{vpc_id}' provided")
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Could not create Egress-Only Internet Gateway for vpc ID {vpc_id}")
if not module.check_mode:
- gateway = response.get('EgressOnlyInternetGateway', {})
- state = gateway.get('Attachments', [{}])[0].get('State')
- gateway_id = gateway.get('EgressOnlyInternetGatewayId')
+ gateway = response.get("EgressOnlyInternetGateway", {})
+ state = gateway.get("Attachments", [{}])[0].get("State")
+ gateway_id = gateway.get("EgressOnlyInternetGatewayId")
- if gateway_id and state in ('attached', 'attaching'):
+ if gateway_id and state in ("attached", "attaching"):
changed = True
else:
# EIGW gave back a bad attachment state or an invalid response so we error out
- module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id),
- **camel_dict_to_snake_dict(response))
+ module.fail_json(
+ msg=f"Unable to create and attach Egress Only Internet Gateway to VPCId: {vpc_id}. Bad or no state in response",
+ **camel_dict_to_snake_dict(response),
+ )
return changed, gateway_id
@@ -147,45 +151,41 @@ def describe_eigws(module, connection, vpc_id):
gateway_id = None
try:
- response = connection.describe_egress_only_internet_gateways(
- aws_retry=True)
+ response = connection.describe_egress_only_internet_gateways(aws_retry=True)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways")
- for eigw in response.get('EgressOnlyInternetGateways', []):
- for attachment in eigw.get('Attachments', []):
- if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'):
- gateway_id = eigw.get('EgressOnlyInternetGatewayId')
+ for eigw in response.get("EgressOnlyInternetGateways", []):
+ for attachment in eigw.get("Attachments", []):
+ if attachment.get("VpcId") == vpc_id and attachment.get("State") in ("attached", "attaching"):
+ gateway_id = eigw.get("EgressOnlyInternetGatewayId")
return gateway_id
def main():
- argument_spec = dict(
- vpc_id=dict(required=True),
- state=dict(default='present', choices=['present', 'absent'])
- )
+ argument_spec = dict(vpc_id=dict(required=True), state=dict(default="present", choices=["present", "absent"]))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
retry_decorator = AWSRetry.jittered_backoff(retries=10)
- connection = module.client('ec2', retry_decorator=retry_decorator)
+ connection = module.client("ec2", retry_decorator=retry_decorator)
- vpc_id = module.params.get('vpc_id')
- state = module.params.get('state')
+ vpc_id = module.params.get("vpc_id")
+ state = module.params.get("state")
eigw_id = describe_eigws(module, connection, vpc_id)
result = dict(gateway_id=eigw_id, vpc_id=vpc_id)
changed = False
- if state == 'present' and not eigw_id:
- changed, result['gateway_id'] = create_eigw(module, connection, vpc_id)
- elif state == 'absent' and eigw_id:
+ if state == "present" and not eigw_id:
+ changed, result["gateway_id"] = create_eigw(module, connection, vpc_id)
+ elif state == "absent" and eigw_id:
changed = delete_eigw(module, connection, eigw_id)
module.exit_json(changed=changed, **result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
index e11df3de5..cf109de1c 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: ec2_vpc_nacl
short_description: create and delete Network ACLs
version_added: 1.0.0
@@ -73,18 +71,18 @@ options:
type: str
choices: ['present', 'absent']
default: present
-author: Mike Mochan (@mmochan)
+author:
+ - Mike Mochan (@mmochan)
+notes:
+ - Support for I(purge_tags) was added in release 4.0.0.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
- amazon.aws.tags
-notes:
- - Support for I(purge_tags) was added in release 4.0.0.
-'''
-
-EXAMPLES = r'''
+"""
+EXAMPLES = r"""
# Complete example to create and delete a network ACL
# that allows SSH, HTTP and ICMP in, and all traffic out.
- name: "Create and associate production DMZ network ACL with DMZ subnets"
@@ -98,16 +96,16 @@ EXAMPLES = r'''
Project: phoenix
Description: production DMZ
ingress:
- # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
- # port from, port to
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [205, 'tcp', 'allow', '::/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
+ # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
+ # port from, port to
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [205, 'tcp', 'allow', '::/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- - [105, 'all', 'allow', '::/0', null, null, null, null]
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ - [105, 'all', 'allow', '::/0', null, null, null, null]
state: 'present'
- name: "Remove the ingress and egress rules - defaults to deny all"
@@ -141,8 +139,9 @@ EXAMPLES = r'''
community.aws.ec2_vpc_nacl:
nacl_id: acl-33b4ee5b
state: absent
-'''
-RETURN = r'''
+"""
+
+RETURN = r"""
task:
description: The result of the create, or delete action.
returned: success
@@ -152,47 +151,48 @@ nacl_id:
returned: success
type: str
sample: acl-123456789abcdef01
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
-PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58}
+PROTOCOL_NUMBERS = {"all": -1, "icmp": 1, "tcp": 6, "udp": 17, "ipv6-icmp": 58}
# Utility methods
def icmp_present(entry):
- if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]:
+ if len(entry) == 6 and entry[1] in ["icmp", "ipv6-icmp"] or entry[1] in [1, 58]:
return True
def subnets_removed(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
- associations = results['NetworkAcls'][0]['Associations']
- subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ associations = results["NetworkAcls"][0]["Associations"]
+ subnet_ids = [assoc["SubnetId"] for assoc in associations]
return [subnet for subnet in subnet_ids if subnet not in subnets]
def subnets_added(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
- associations = results['NetworkAcls'][0]['Associations']
- subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ associations = results["NetworkAcls"][0]["Associations"]
+ subnet_ids = [assoc["SubnetId"] for assoc in associations]
return [subnet for subnet in subnets if subnet not in subnet_ids]
def subnets_changed(nacl, client, module):
changed = False
- vpc_id = module.params.get('vpc_id')
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ vpc_id = module.params.get("vpc_id")
+ nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"]
subnets = subnets_to_associate(nacl, client, module)
if not subnets:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
@@ -218,40 +218,41 @@ def subnets_changed(nacl, client, module):
def nacls_changed(nacl, client, module):
changed = False
params = dict()
- params['egress'] = module.params.get('egress')
- params['ingress'] = module.params.get('ingress')
+ params["egress"] = module.params.get("egress")
+ params["ingress"] = module.params.get("ingress")
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"]
nacl = describe_network_acl(client, module)
- entries = nacl['NetworkAcls'][0]['Entries']
- egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767]
- ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767]
- if rules_changed(egress, params['egress'], True, nacl_id, client, module):
+ entries = nacl["NetworkAcls"][0]["Entries"]
+ egress = [rule for rule in entries if rule["Egress"] is True and rule["RuleNumber"] < 32767]
+ ingress = [rule for rule in entries if rule["Egress"] is False and rule["RuleNumber"] < 32767]
+ if rules_changed(egress, params["egress"], True, nacl_id, client, module):
changed = True
- if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
+ if rules_changed(ingress, params["ingress"], False, nacl_id, client, module):
changed = True
return changed
def tags_changed(nacl_id, client, module):
- tags = module.params.get('tags')
- name = module.params.get('name')
- purge_tags = module.params.get('purge_tags')
+ tags = module.params.get("tags")
+ name = module.params.get("name")
+ purge_tags = module.params.get("purge_tags")
if name is None and tags is None:
return False
- if module.params.get('tags') is None:
+ if module.params.get("tags") is None:
# Only purge tags if tags is explicitly set to {} and purge_tags is True
purge_tags = False
new_tags = dict()
- if module.params.get('name') is not None:
- new_tags['Name'] = module.params.get('name')
- new_tags.update(module.params.get('tags') or {})
+ if module.params.get("name") is not None:
+ new_tags["Name"] = module.params.get("name")
+ new_tags.update(module.params.get("tags") or {})
- return ensure_ec2_tags(client, module, nacl_id, tags=new_tags,
- purge_tags=purge_tags, retry_codes=['InvalidNetworkAclID.NotFound'])
+ return ensure_ec2_tags(
+ client, module, nacl_id, tags=new_tags, purge_tags=purge_tags, retry_codes=["InvalidNetworkAclID.NotFound"]
+ )
def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
@@ -266,60 +267,60 @@ def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
if removed_rules:
params = dict()
for rule in removed_rules:
- params['NetworkAclId'] = nacl_id
- params['RuleNumber'] = rule['RuleNumber']
- params['Egress'] = Egress
+ params["NetworkAclId"] = nacl_id
+ params["RuleNumber"] = rule["RuleNumber"]
+ params["Egress"] = Egress
delete_network_acl_entry(params, client, module)
changed = True
added_rules = [x for x in rules if x not in aws_rules]
if added_rules:
for rule in added_rules:
- rule['NetworkAclId'] = nacl_id
+ rule["NetworkAclId"] = nacl_id
create_network_acl_entry(rule, client, module)
changed = True
return changed
def is_ipv6(cidr):
- return ':' in cidr
+ return ":" in cidr
def process_rule_entry(entry, Egress):
params = dict()
- params['RuleNumber'] = entry[0]
- params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
- params['RuleAction'] = entry[2]
- params['Egress'] = Egress
+ params["RuleNumber"] = entry[0]
+ params["Protocol"] = str(PROTOCOL_NUMBERS[entry[1]])
+ params["RuleAction"] = entry[2]
+ params["Egress"] = Egress
if is_ipv6(entry[3]):
- params['Ipv6CidrBlock'] = entry[3]
+ params["Ipv6CidrBlock"] = entry[3]
else:
- params['CidrBlock'] = entry[3]
+ params["CidrBlock"] = entry[3]
if icmp_present(entry):
- params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
+ params["IcmpTypeCode"] = {"Type": int(entry[4]), "Code": int(entry[5])}
else:
if entry[6] or entry[7]:
- params['PortRange'] = {"From": entry[6], 'To': entry[7]}
+ params["PortRange"] = {"From": entry[6], "To": entry[7]}
return params
def restore_default_associations(assoc_ids, default_nacl_id, client, module):
if assoc_ids:
params = dict()
- params['NetworkAclId'] = default_nacl_id[0]
+ params["NetworkAclId"] = default_nacl_id[0]
for assoc_id in assoc_ids:
- params['AssociationId'] = assoc_id
+ params["AssociationId"] = assoc_id
restore_default_acl_association(params, client, module)
return True
def construct_acl_entries(nacl, client, module):
- for entry in module.params.get('ingress'):
+ for entry in module.params.get("ingress"):
params = process_rule_entry(entry, Egress=False)
- params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"]
create_network_acl_entry(params, client, module)
- for rule in module.params.get('egress'):
+ for rule in module.params.get("egress"):
params = process_rule_entry(rule, Egress=True)
- params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ params["NetworkAclId"] = nacl["NetworkAcl"]["NetworkAclId"]
create_network_acl_entry(params, client, module)
@@ -327,21 +328,21 @@ def construct_acl_entries(nacl, client, module):
def setup_network_acl(client, module):
changed = False
nacl = describe_network_acl(client, module)
- if not nacl['NetworkAcls']:
+ if not nacl["NetworkAcls"]:
tags = {}
- if module.params.get('name'):
- tags['Name'] = module.params.get('name')
- tags.update(module.params.get('tags') or {})
- nacl = create_network_acl(module.params.get('vpc_id'), client, module, tags)
- nacl_id = nacl['NetworkAcl']['NetworkAclId']
+ if module.params.get("name"):
+ tags["Name"] = module.params.get("name")
+ tags.update(module.params.get("tags") or {})
+ nacl = create_network_acl(module.params.get("vpc_id"), client, module, tags)
+ nacl_id = nacl["NetworkAcl"]["NetworkAclId"]
subnets = subnets_to_associate(nacl, client, module)
replace_network_acl_association(nacl_id, subnets, client, module)
construct_acl_entries(nacl, client, module)
changed = True
- return changed, nacl['NetworkAcl']['NetworkAclId']
+ return changed, nacl["NetworkAcl"]["NetworkAclId"]
else:
changed = False
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"]
changed |= subnets_changed(nacl, client, module)
changed |= nacls_changed(nacl, client, module)
changed |= tags_changed(nacl_id, client, module)
@@ -352,11 +353,11 @@ def remove_network_acl(client, module):
changed = False
result = dict()
nacl = describe_network_acl(client, module)
- if nacl['NetworkAcls']:
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
- vpc_id = nacl['NetworkAcls'][0]['VpcId']
- associations = nacl['NetworkAcls'][0]['Associations']
- assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
+ if nacl["NetworkAcls"]:
+ nacl_id = nacl["NetworkAcls"][0]["NetworkAclId"]
+ vpc_id = nacl["NetworkAcls"][0]["VpcId"]
+ associations = nacl["NetworkAcls"][0]["Associations"]
+ assoc_ids = [a["NetworkAclAssociationId"] for a in associations]
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
if not default_nacl_id:
result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
@@ -383,7 +384,7 @@ def _create_network_acl(client, *args, **kwargs):
def create_network_acl(vpc_id, client, module, tags):
params = dict(VpcId=vpc_id)
if tags:
- params['TagSpecifications'] = boto3_tag_specifications(tags, ['network-acl'])
+ params["TagSpecifications"] = boto3_tag_specifications(tags, ["network-acl"])
try:
if module.check_mode:
nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000"))
@@ -394,7 +395,7 @@ def create_network_acl(vpc_id, client, module, tags):
return nacl
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"])
def _create_network_acl_entry(client, *args, **kwargs):
return client.create_network_acl_entry(*args, **kwargs)
@@ -420,7 +421,7 @@ def delete_network_acl(nacl_id, client, module):
module.fail_json_aws(e)
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"])
def _delete_network_acl_entry(client, *args, **kwargs):
return client.delete_network_acl_entry(*args, **kwargs)
@@ -438,7 +439,7 @@ def _describe_network_acls(client, **kwargs):
return client.describe_network_acls(**kwargs)
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"])
def _describe_network_acls_retry_missing(client, **kwargs):
return client.describe_network_acls(**kwargs)
@@ -447,25 +448,23 @@ def describe_acl_associations(subnets, client, module):
if not subnets:
return []
try:
- results = _describe_network_acls_retry_missing(client, Filters=[
- {'Name': 'association.subnet-id', 'Values': subnets}
- ])
+ results = _describe_network_acls_retry_missing(
+ client, Filters=[{"Name": "association.subnet-id", "Values": subnets}]
+ )
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
- associations = results['NetworkAcls'][0]['Associations']
- return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
+ associations = results["NetworkAcls"][0]["Associations"]
+ return [a["NetworkAclAssociationId"] for a in associations if a["SubnetId"] in subnets]
def describe_network_acl(client, module):
try:
- if module.params.get('nacl_id'):
- nacl = _describe_network_acls(client, Filters=[
- {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]}
- ])
+ if module.params.get("nacl_id"):
+ nacl = _describe_network_acls(
+ client, Filters=[{"Name": "network-acl-id", "Values": [module.params.get("nacl_id")]}]
+ )
else:
- nacl = _describe_network_acls(client, Filters=[
- {'Name': 'tag:Name', 'Values': [module.params.get('name')]}
- ])
+ nacl = _describe_network_acls(client, Filters=[{"Name": "tag:Name", "Values": [module.params.get("name")]}])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
return nacl
@@ -480,38 +479,37 @@ def find_acl_by_id(nacl_id, client, module):
def find_default_vpc_nacl(vpc_id, client, module):
try:
- response = _describe_network_acls_retry_missing(client, Filters=[
- {'Name': 'vpc-id', 'Values': [vpc_id]}])
+ response = _describe_network_acls_retry_missing(client, Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
- nacls = response['NetworkAcls']
- return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True]
+ nacls = response["NetworkAcls"]
+ return [n["NetworkAclId"] for n in nacls if n["IsDefault"] is True]
def find_subnet_ids_by_nacl_id(nacl_id, client, module):
try:
- results = _describe_network_acls_retry_missing(client, Filters=[
- {'Name': 'association.network-acl-id', 'Values': [nacl_id]}
- ])
+ results = _describe_network_acls_retry_missing(
+ client, Filters=[{"Name": "association.network-acl-id", "Values": [nacl_id]}]
+ )
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
- if results['NetworkAcls']:
- associations = results['NetworkAcls'][0]['Associations']
- return [s['SubnetId'] for s in associations if s['SubnetId']]
+ if results["NetworkAcls"]:
+ associations = results["NetworkAcls"][0]["Associations"]
+ return [s["SubnetId"] for s in associations if s["SubnetId"]]
else:
return []
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"])
def _replace_network_acl_association(client, *args, **kwargs):
return client.replace_network_acl_association(*args, **kwargs)
def replace_network_acl_association(nacl_id, subnets, client, module):
params = dict()
- params['NetworkAclId'] = nacl_id
+ params["NetworkAclId"] = nacl_id
for association in describe_acl_associations(subnets, client, module):
- params['AssociationId'] = association
+ params["AssociationId"] = association
try:
if not module.check_mode:
_replace_network_acl_association(client, **params)
@@ -519,7 +517,7 @@ def replace_network_acl_association(nacl_id, subnets, client, module):
module.fail_json_aws(e)
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"])
def _replace_network_acl_entry(client, *args, **kwargs):
return client.replace_network_acl_entry(*args, **kwargs)
@@ -527,7 +525,7 @@ def _replace_network_acl_entry(client, *args, **kwargs):
def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
for entry in entries:
params = entry
- params['NetworkAclId'] = nacl_id
+ params["NetworkAclId"] = nacl_id
try:
if not module.check_mode:
_replace_network_acl_entry(client, **params)
@@ -535,7 +533,7 @@ def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
module.fail_json_aws(e)
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidNetworkAclID.NotFound"])
def _replace_network_acl_association(client, *args, **kwargs):
return client.replace_network_acl_association(*args, **kwargs)
@@ -554,25 +552,23 @@ def _describe_subnets(client, *args, **kwargs):
def subnets_to_associate(nacl, client, module):
- params = list(module.params.get('subnets'))
+ params = list(module.params.get("subnets"))
if not params:
return []
all_found = []
if any(x.startswith("subnet-") for x in params):
try:
- subnets = _describe_subnets(client, Filters=[
- {'Name': 'subnet-id', 'Values': params}])
- all_found.extend(subnets.get('Subnets', []))
+ subnets = _describe_subnets(client, Filters=[{"Name": "subnet-id", "Values": params}])
+ all_found.extend(subnets.get("Subnets", []))
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
if len(params) != len(all_found):
try:
- subnets = _describe_subnets(client, Filters=[
- {'Name': 'tag:Name', 'Values': params}])
- all_found.extend(subnets.get('Subnets', []))
+ subnets = _describe_subnets(client, Filters=[{"Name": "tag:Name", "Values": params}])
+ all_found.extend(subnets.get("Subnets", []))
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
- return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId')))
+ return list(set(s["SubnetId"] for s in all_found if s.get("SubnetId")))
def main():
@@ -580,29 +576,31 @@ def main():
vpc_id=dict(),
name=dict(),
nacl_id=dict(),
- subnets=dict(required=False, type='list', default=list(), elements='str'),
- tags=dict(required=False, type='dict', aliases=['resource_tags']),
- purge_tags=dict(required=False, type='bool', default=True),
- ingress=dict(required=False, type='list', default=list(), elements='list'),
- egress=dict(required=False, type='list', default=list(), elements='list'),
- state=dict(default='present', choices=['present', 'absent']),
+ subnets=dict(required=False, type="list", default=list(), elements="str"),
+ tags=dict(required=False, type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(required=False, type="bool", default=True),
+ ingress=dict(required=False, type="list", default=list(), elements="list"),
+ egress=dict(required=False, type="list", default=list(), elements="list"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[["name", "nacl_id"]],
+ required_if=[["state", "present", ["vpc_id"]]],
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[['name', 'nacl_id']],
- required_if=[['state', 'present', ['vpc_id']]])
- state = module.params.get('state').lower()
+ state = module.params.get("state").lower()
- client = module.client('ec2')
+ client = module.client("ec2")
invocations = {
"present": setup_network_acl,
- "absent": remove_network_acl
+ "absent": remove_network_acl,
}
(changed, results) = invocations[state](client, module)
module.exit_json(changed=changed, nacl_id=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
index b85c94236..d95508a89 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_nacl_info.py
@@ -1,18 +1,18 @@
#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_vpc_nacl_info
version_added: 1.0.0
short_description: Gather information about Network ACLs in an AWS VPC
description:
- - Gather information about Network ACLs in an AWS VPC
-author: "Brad Davidson (@brandond)"
+ - Gather information about Network ACLs in an AWS VPC
+author:
+ - "Brad Davidson (@brandond)"
options:
nacl_ids:
description:
@@ -34,12 +34,12 @@ notes:
- By default, the module will return all Network ACLs.
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all Network ACLs:
@@ -55,9 +55,9 @@ EXAMPLES = r'''
filters:
'default': 'true'
register: default_nacls
-'''
+"""
-RETURN = r'''
+RETURN = r"""
nacls:
description: Returns an array of complex objects as described below.
returned: success
@@ -100,7 +100,7 @@ nacls:
type: list
elements: list
sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]]
-'''
+"""
try:
import botocore
@@ -109,20 +109,19 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
-PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
+PROTOCOL_NAMES = {"-1": "all", "1": "icmp", "6": "tcp", "17": "udp"}
def list_ec2_vpc_nacls(connection, module):
-
nacl_ids = module.params.get("nacl_ids")
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
@@ -131,86 +130,97 @@ def list_ec2_vpc_nacls(connection, module):
try:
nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters)
- except is_boto3_error_code('InvalidNetworkAclID.NotFound'):
- module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
+ except is_boto3_error_code("InvalidNetworkAclID.NotFound"):
+ module.fail_json(msg="Unable to describe ACL. NetworkAcl does not exist")
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Unable to describe network ACLs {nacl_ids}")
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_nacls = []
- for nacl in nacls['NetworkAcls']:
+ for nacl in nacls["NetworkAcls"]:
snaked_nacls.append(camel_dict_to_snake_dict(nacl))
# Turn the boto3 result in to ansible friendly tag dictionary
for nacl in snaked_nacls:
- if 'tags' in nacl:
- nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value')
- if 'entries' in nacl:
- nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
- if entry['rule_number'] < 32767 and entry['egress']]
- nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
- if entry['rule_number'] < 32767 and not entry['egress']]
- del nacl['entries']
- if 'associations' in nacl:
- nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
- del nacl['associations']
- if 'network_acl_id' in nacl:
- nacl['nacl_id'] = nacl['network_acl_id']
- del nacl['network_acl_id']
+ if "tags" in nacl:
+ nacl["tags"] = boto3_tag_list_to_ansible_dict(nacl["tags"], "key", "value")
+ if "entries" in nacl:
+ nacl["egress"] = [
+ nacl_entry_to_list(entry)
+ for entry in nacl["entries"]
+ if entry["rule_number"] < 32767 and entry["egress"]
+ ]
+ nacl["ingress"] = [
+ nacl_entry_to_list(entry)
+ for entry in nacl["entries"]
+ if entry["rule_number"] < 32767 and not entry["egress"]
+ ]
+ del nacl["entries"]
+ if "associations" in nacl:
+ nacl["subnets"] = [a["subnet_id"] for a in nacl["associations"]]
+ del nacl["associations"]
+ if "network_acl_id" in nacl:
+ nacl["nacl_id"] = nacl["network_acl_id"]
+ del nacl["network_acl_id"]
module.exit_json(nacls=snaked_nacls)
def nacl_entry_to_list(entry):
-
# entry list format
# [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to]
elist = []
- elist.append(entry['rule_number'])
+ elist.append(entry["rule_number"])
- if entry.get('protocol') in PROTOCOL_NAMES:
- elist.append(PROTOCOL_NAMES[entry['protocol']])
+ if entry.get("protocol") in PROTOCOL_NAMES:
+ elist.append(PROTOCOL_NAMES[entry["protocol"]])
else:
- elist.append(entry.get('protocol'))
+ elist.append(entry.get("protocol"))
- elist.append(entry['rule_action'])
+ elist.append(entry["rule_action"])
- if entry.get('cidr_block'):
- elist.append(entry['cidr_block'])
- elif entry.get('ipv6_cidr_block'):
- elist.append(entry['ipv6_cidr_block'])
+ if entry.get("cidr_block"):
+ elist.append(entry["cidr_block"])
+ elif entry.get("ipv6_cidr_block"):
+ elist.append(entry["ipv6_cidr_block"])
else:
elist.append(None)
elist = elist + [None, None, None, None]
- if entry['protocol'] in ('1', '58'):
- elist[4] = entry.get('icmp_type_code', {}).get('type')
- elist[5] = entry.get('icmp_type_code', {}).get('code')
+ if entry["protocol"] in ("1", "58"):
+ elist[4] = entry.get("icmp_type_code", {}).get("type")
+ elist[5] = entry.get("icmp_type_code", {}).get("code")
- if entry['protocol'] not in ('1', '6', '17', '58'):
+ if entry["protocol"] not in ("1", "6", "17", "58"):
elist[6] = 0
elist[7] = 65535
- elif 'port_range' in entry:
- elist[6] = entry['port_range']['from']
- elist[7] = entry['port_range']['to']
+ elif "port_range" in entry:
+ elist[6] = entry["port_range"]["from"]
+ elist[7] = entry["port_range"]["to"]
return elist
def main():
-
argument_spec = dict(
- nacl_ids=dict(default=[], type='list', aliases=['nacl_id'], elements='str'),
- filters=dict(default={}, type='dict'))
+ nacl_ids=dict(default=[], type="list", aliases=["nacl_id"], elements="str"),
+ filters=dict(default={}, type="dict"),
+ )
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
- connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
list_ec2_vpc_nacls(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
index f23ffae19..2a731bf23 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peer.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: ec2_vpc_peer
short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
version_added: 1.0.0
@@ -57,13 +55,13 @@ notes:
author:
- Mike Mochan (@mmochan)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Complete example to create and accept a local peering connection.
- name: Create local account VPC peering Connection
community.aws.ec2_vpc_peer:
@@ -211,9 +209,9 @@ EXAMPLES = '''
peering_id: "{{ vpc_peer.peering_id }}"
profile: bot03_profile_for_cross_account
state: reject
+"""
-'''
-RETURN = '''
+RETURN = r"""
peering_id:
description: The id of the VPC peering connection created/deleted.
returned: always
@@ -352,33 +350,33 @@ vpc_peering_connection:
returned: success
type: str
example: "pcx-0123456789abcdef0"
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def wait_for_state(client, module, state, pcx_id):
- waiter = client.get_waiter('vpc_peering_connection_exists')
+ waiter = client.get_waiter("vpc_peering_connection_exists")
peer_filter = {
- 'vpc-peering-connection-id': pcx_id,
- 'status-code': state,
+ "vpc-peering-connection-id": pcx_id,
+ "status-code": state,
}
try:
- waiter.wait(
- Filters=ansible_dict_to_boto3_filter_list(peer_filter)
- )
+ waiter.wait(Filters=ansible_dict_to_boto3_filter_list(peer_filter))
except botocore.exceptions.WaiterError as e:
module.fail_json_aws(e, "Failed to wait for state change")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -387,18 +385,18 @@ def wait_for_state(client, module, state, pcx_id):
def describe_peering_connections(params, client):
peer_filter = {
- 'requester-vpc-info.vpc-id': params['VpcId'],
- 'accepter-vpc-info.vpc-id': params['PeerVpcId'],
+ "requester-vpc-info.vpc-id": params["VpcId"],
+ "accepter-vpc-info.vpc-id": params["PeerVpcId"],
}
result = client.describe_vpc_peering_connections(
aws_retry=True,
Filters=ansible_dict_to_boto3_filter_list(peer_filter),
)
- if result['VpcPeeringConnections'] == []:
+ if result["VpcPeeringConnections"] == []:
# Try again with the VPC/Peer relationship reversed
peer_filter = {
- 'requester-vpc-info.vpc-id': params['PeerVpcId'],
- 'accepter-vpc-info.vpc-id': params['VpcId'],
+ "requester-vpc-info.vpc-id": params["PeerVpcId"],
+ "accepter-vpc-info.vpc-id": params["VpcId"],
}
result = client.describe_vpc_peering_connections(
aws_retry=True,
@@ -409,29 +407,32 @@ def describe_peering_connections(params, client):
def is_active(peering_conn):
- return peering_conn['Status']['Code'] == 'active'
+ return peering_conn["Status"]["Code"] == "active"
def is_pending(peering_conn):
- return peering_conn['Status']['Code'] == 'pending-acceptance'
+ return peering_conn["Status"]["Code"] == "pending-acceptance"
def create_peer_connection(client, module):
changed = False
params = dict()
- params['VpcId'] = module.params.get('vpc_id')
- params['PeerVpcId'] = module.params.get('peer_vpc_id')
- if module.params.get('peer_region'):
- params['PeerRegion'] = module.params.get('peer_region')
- if module.params.get('peer_owner_id'):
- params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ params["VpcId"] = module.params.get("vpc_id")
+ params["PeerVpcId"] = module.params.get("peer_vpc_id")
+ if module.params.get("peer_region"):
+ params["PeerRegion"] = module.params.get("peer_region")
+ if module.params.get("peer_owner_id"):
+ params["PeerOwnerId"] = str(module.params.get("peer_owner_id"))
peering_conns = describe_peering_connections(params, client)
- for peering_conn in peering_conns['VpcPeeringConnections']:
- pcx_id = peering_conn['VpcPeeringConnectionId']
- if ensure_ec2_tags(client, module, pcx_id,
- purge_tags=module.params.get('purge_tags'),
- tags=module.params.get('tags'),
- ):
+ for peering_conn in peering_conns["VpcPeeringConnections"]:
+ pcx_id = peering_conn["VpcPeeringConnectionId"]
+ if ensure_ec2_tags(
+ client,
+ module,
+ pcx_id,
+ purge_tags=module.params.get("purge_tags"),
+ tags=module.params.get("tags"),
+ ):
changed = True
if is_active(peering_conn):
return (changed, peering_conn)
@@ -439,54 +440,59 @@ def create_peer_connection(client, module):
return (changed, peering_conn)
try:
peering_conn = client.create_vpc_peering_connection(aws_retry=True, **params)
- pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
- if module.params.get('tags'):
+ pcx_id = peering_conn["VpcPeeringConnection"]["VpcPeeringConnectionId"]
+ if module.params.get("tags"):
# Once the minimum botocore version is bumped to > 1.17.24
# (hopefully community.aws 3.0.0) we can add the tags to the
# creation parameters
- add_ec2_tags(client, module, pcx_id, module.params.get('tags'),
- retry_codes=['InvalidVpcPeeringConnectionID.NotFound'])
- if module.params.get('wait'):
- wait_for_state(client, module, 'pending-acceptance', pcx_id)
+ add_ec2_tags(
+ client,
+ module,
+ pcx_id,
+ module.params.get("tags"),
+ retry_codes=["InvalidVpcPeeringConnectionID.NotFound"],
+ )
+ if module.params.get("wait"):
+ wait_for_state(client, module, "pending-acceptance", pcx_id)
changed = True
- return (changed, peering_conn['VpcPeeringConnection'])
+ return (changed, peering_conn["VpcPeeringConnection"])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def remove_peer_connection(client, module):
- pcx_id = module.params.get('peering_id')
+ pcx_id = module.params.get("peering_id")
if pcx_id:
peering_conn = get_peering_connection_by_id(pcx_id, client, module)
else:
params = dict()
- params['VpcId'] = module.params.get('vpc_id')
- params['PeerVpcId'] = module.params.get('peer_vpc_id')
- params['PeerRegion'] = module.params.get('peer_region')
- if module.params.get('peer_owner_id'):
- params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
- peering_conn = describe_peering_connections(params, client)['VpcPeeringConnections'][0]
+ params["VpcId"] = module.params.get("vpc_id")
+ params["PeerVpcId"] = module.params.get("peer_vpc_id")
+ params["PeerRegion"] = module.params.get("peer_region")
+ if module.params.get("peer_owner_id"):
+ params["PeerOwnerId"] = str(module.params.get("peer_owner_id"))
+ peering_conn = describe_peering_connections(params, client)["VpcPeeringConnections"][0]
if not peering_conn:
module.exit_json(changed=False)
else:
- pcx_id = pcx_id or peering_conn['VpcPeeringConnectionId']
+ pcx_id = pcx_id or peering_conn["VpcPeeringConnectionId"]
- if peering_conn['Status']['Code'] == 'deleted':
- module.exit_json(msg='Connection in deleted state.', changed=False, peering_id=pcx_id)
- if peering_conn['Status']['Code'] == 'rejected':
+ if peering_conn["Status"]["Code"] == "deleted":
+ module.exit_json(msg="Connection in deleted state.", changed=False, peering_id=pcx_id)
+ if peering_conn["Status"]["Code"] == "rejected":
module.exit_json(
- msg='Connection has been rejected. State cannot be changed and will be removed automatically by AWS',
+ msg="Connection has been rejected. State cannot be changed and will be removed automatically by AWS",
changed=False,
- peering_id=pcx_id
+ peering_id=pcx_id,
)
try:
params = dict()
- params['VpcPeeringConnectionId'] = pcx_id
+ params["VpcPeeringConnectionId"] = pcx_id
client.delete_vpc_peering_connection(aws_retry=True, **params)
- if module.params.get('wait'):
- wait_for_state(client, module, 'deleted', pcx_id)
+ if module.params.get("wait"):
+ wait_for_state(client, module, "deleted", pcx_id)
module.exit_json(changed=True, peering_id=pcx_id)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
@@ -494,44 +500,55 @@ def remove_peer_connection(client, module):
def get_peering_connection_by_id(peering_id, client, module):
params = dict()
- params['VpcPeeringConnectionIds'] = [peering_id]
+ params["VpcPeeringConnectionIds"] = [peering_id]
try:
vpc_peering_connection = client.describe_vpc_peering_connections(aws_retry=True, **params)
- return vpc_peering_connection['VpcPeeringConnections'][0]
- except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e:
- module.fail_json_aws(e, msg='Malformed connection ID')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Error while describing peering connection by peering_id')
+ return vpc_peering_connection["VpcPeeringConnections"][0]
+ except is_boto3_error_code("InvalidVpcPeeringConnectionId.Malformed") as e:
+ module.fail_json_aws(e, msg="Malformed connection ID")
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error while describing peering connection by peering_id")
def accept_reject(state, client, module):
changed = False
params = dict()
- peering_id = module.params.get('peering_id')
- params['VpcPeeringConnectionId'] = peering_id
+ peering_id = module.params.get("peering_id")
+ params["VpcPeeringConnectionId"] = peering_id
vpc_peering_connection = get_peering_connection_by_id(peering_id, client, module)
- peering_status = vpc_peering_connection['Status']['Code']
+ peering_status = vpc_peering_connection["Status"]["Code"]
- if peering_status not in ['active', 'rejected']:
+ if peering_status not in ["active", "rejected"]:
try:
- if state == 'accept':
+ if state == "accept":
client.accept_vpc_peering_connection(aws_retry=True, **params)
- target_state = 'active'
+ target_state = "active"
else:
client.reject_vpc_peering_connection(aws_retry=True, **params)
- target_state = 'rejected'
- if module.params.get('tags'):
- add_ec2_tags(client, module, peering_id, module.params.get('tags'),
- retry_codes=['InvalidVpcPeeringConnectionID.NotFound'])
+ target_state = "rejected"
+ if module.params.get("tags"):
+ add_ec2_tags(
+ client,
+ module,
+ peering_id,
+ module.params.get("tags"),
+ retry_codes=["InvalidVpcPeeringConnectionID.NotFound"],
+ )
changed = True
- if module.params.get('wait'):
+ if module.params.get("wait"):
wait_for_state(client, module, target_state, peering_id)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
- if ensure_ec2_tags(client, module, peering_id,
- purge_tags=module.params.get('purge_tags'),
- tags=module.params.get('tags'),
- ):
+ if ensure_ec2_tags(
+ client,
+ module,
+ peering_id,
+ purge_tags=module.params.get("purge_tags"),
+ tags=module.params.get("tags"),
+ ):
changed = True
# Relaod peering conection infos to return latest state/params
@@ -546,34 +563,36 @@ def main():
peer_region=dict(),
peering_id=dict(),
peer_owner_id=dict(),
- tags=dict(required=False, type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, type='bool'),
- state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']),
- wait=dict(default=False, type='bool'),
+ tags=dict(required=False, type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
+ state=dict(default="present", choices=["present", "absent", "accept", "reject"]),
+ wait=dict(default=False, type="bool"),
)
required_if = [
- ('state', 'present', ['vpc_id', 'peer_vpc_id']),
- ('state', 'accept', ['peering_id']),
- ('state', 'reject', ['peering_id'])
+ ("state", "present", ["vpc_id", "peer_vpc_id"]),
+ ("state", "accept", ["peering_id"]),
+ ("state", "reject", ["peering_id"]),
]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if)
- state = module.params.get('state')
- peering_id = module.params.get('peering_id')
- vpc_id = module.params.get('vpc_id')
- peer_vpc_id = module.params.get('peer_vpc_id')
+ state = module.params.get("state")
+ peering_id = module.params.get("peering_id")
+ vpc_id = module.params.get("vpc_id")
+ peer_vpc_id = module.params.get("peer_vpc_id")
try:
- client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- if state == 'present':
+ if state == "present":
(changed, results) = create_peer_connection(client, module)
- elif state == 'absent':
+ elif state == "absent":
if not peering_id and (not vpc_id or not peer_vpc_id):
- module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]')
+ module.fail_json(
+ msg="state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]"
+ )
remove_peer_connection(client, module)
else:
@@ -581,10 +600,12 @@ def main():
formatted_results = camel_dict_to_snake_dict(results)
# Turn the resource tags from boto3 into an ansible friendly tag dictionary
- formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', []))
+ formatted_results["tags"] = boto3_tag_list_to_ansible_dict(formatted_results.get("tags", []))
- module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId'])
+ module.exit_json(
+ changed=changed, vpc_peering_connection=formatted_results, peering_id=results["VpcPeeringConnectionId"]
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
index 680fa3b68..badc9f8fd 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_peering_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: ec2_vpc_peering_info
short_description: Retrieves AWS VPC Peering details using AWS methods.
version_added: 1.0.0
@@ -25,15 +23,15 @@ options:
for possible filters.
type: dict
default: {}
-author: Karen Cheng (@Etherdaemon)
+author:
+ - Karen Cheng (@Etherdaemon)
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Simple example of listing all VPC Peers
- name: List all vpc peers
community.aws.ec2_vpc_peering_info:
@@ -58,9 +56,9 @@ EXAMPLES = r'''
filters:
status-code: ['pending-acceptance']
register: pending_vpc_peers
-'''
+"""
-RETURN = r'''
+RETURN = r"""
vpc_peering_connections:
description: Details of the matching VPC peering connections.
returned: success
@@ -199,58 +197,62 @@ result:
description: The result of the describe.
returned: success
type: list
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_vpc_peers(client, module):
params = dict()
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- if module.params.get('peer_connection_ids'):
- params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids')
+ params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ if module.params.get("peer_connection_ids"):
+ params["VpcPeeringConnectionIds"] = module.params.get("peer_connection_ids")
try:
result = client.describe_vpc_peering_connections(aws_retry=True, **params)
result = normalize_boto3_result(result)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe peering connections")
- return result['VpcPeeringConnections']
+ return result["VpcPeeringConnections"]
def main():
argument_spec = dict(
- filters=dict(default=dict(), type='dict'),
- peer_connection_ids=dict(default=None, type='list', elements='str'),
+ filters=dict(default=dict(), type="dict"),
+ peer_connection_ids=dict(default=None, type="list", elements="str"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,)
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
try:
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
# Turn the boto3 result in to ansible friendly_snaked_names
results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)]
# Turn the boto3 result in to ansible friendly tag dictionary
for peer in results:
- peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', []))
+ peer["tags"] = boto3_tag_list_to_ansible_dict(peer.get("tags", []))
module.exit_json(result=results, vpc_peering_connections=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
index 8332e1006..135658f76 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: ec2_vpc_vgw
short_description: Create and delete AWS VPN Virtual Gateways
version_added: 1.0.0
@@ -55,13 +53,13 @@ notes:
author:
- Nick Aslanidis (@naslanidis)
extends_documentation_fragment:
- - amazon.aws.ec2
- - amazon.aws.aws
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new VGW attached to a specific VPC
community.aws.ec2_vpc_vgw:
state: present
@@ -100,9 +98,9 @@ EXAMPLES = '''
profile: personal
vpn_gateway_id: vgw-3a9aa123
register: deleted_vgw
-'''
+"""
-RETURN = '''
+RETURN = r"""
vgw:
description: A description of the VGW
returned: success
@@ -133,7 +131,7 @@ vgw:
type: str
returned: success
example: vpc-123456789abcdef01
-'''
+"""
import time
@@ -142,13 +140,14 @@ try:
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags
-from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
-from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
# AWS uses VpnGatewayLimitExceeded for both 'Too many VGWs' and 'Too many concurrent changes'
@@ -156,11 +155,14 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_ta
class VGWRetry(AWSRetry):
@staticmethod
def status_code_from_exception(error):
- return (error.response['Error']['Code'], error.response['Error']['Message'],)
+ return (
+ error.response["Error"]["Code"],
+ error.response["Error"]["Message"],
+ )
@staticmethod
def found(response_code, catch_extra_error_codes=None):
- retry_on = ['The maximum number of mutating objects has been reached.']
+ retry_on = ["The maximum number of mutating objects has been reached."]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
@@ -180,37 +182,37 @@ def get_vgw_info(vgws):
for vgw in vgws:
vgw_info = {
- 'id': vgw['VpnGatewayId'],
- 'type': vgw['Type'],
- 'state': vgw['State'],
- 'vpc_id': None,
- 'tags': dict()
+ "id": vgw["VpnGatewayId"],
+ "type": vgw["Type"],
+ "state": vgw["State"],
+ "vpc_id": None,
+ "tags": dict(),
}
- if vgw['Tags']:
- vgw_info['tags'] = boto3_tag_list_to_ansible_dict(vgw['Tags'])
+ if vgw["Tags"]:
+ vgw_info["tags"] = boto3_tag_list_to_ansible_dict(vgw["Tags"])
- if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
- vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
+ if len(vgw["VpcAttachments"]) != 0 and vgw["VpcAttachments"][0]["State"] == "attached":
+ vgw_info["vpc_id"] = vgw["VpcAttachments"][0]["VpcId"]
return vgw_info
def wait_for_status(client, module, vpn_gateway_id, status):
polling_increment_secs = 15
- max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ max_retries = module.params.get("wait_timeout") // polling_increment_secs
status_achieved = False
for x in range(0, max_retries):
try:
response = find_vgw(client, module, vpn_gateway_id)
- if response[0]['VpcAttachments'][0]['State'] == status:
+ if response[0]["VpcAttachments"][0]["State"] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failure while waiting for status update')
+ module.fail_json_aws(e, msg="Failure while waiting for status update")
result = response
return status_achieved, result
@@ -218,22 +220,21 @@ def wait_for_status(client, module, vpn_gateway_id, status):
def attach_vgw(client, module, vpn_gateway_id):
params = dict()
- params['VpcId'] = module.params.get('vpc_id')
+ params["VpcId"] = module.params.get("vpc_id")
try:
# Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State
# as available several seconds before actually permitting a new attachment.
# So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185
- response = VGWRetry.jittered_backoff(retries=5,
- catch_extra_error_codes=['InvalidParameterValue']
- )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id,
- VpcId=params['VpcId'])
+ response = VGWRetry.jittered_backoff(retries=5, catch_extra_error_codes=["InvalidParameterValue"])(
+ client.attach_vpn_gateway
+ )(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to attach VPC')
+ module.fail_json_aws(e, msg="Failed to attach VPC")
- status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "attached")
if not status_achieved:
- module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
+ module.fail_json(msg="Error waiting for vpc to attach to vgw - please check the AWS console")
result = response
return result
@@ -241,19 +242,19 @@ def attach_vgw(client, module, vpn_gateway_id):
def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
params = dict()
- params['VpcId'] = module.params.get('vpc_id')
+ params["VpcId"] = module.params.get("vpc_id")
try:
if vpc_id:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id, aws_retry=True)
else:
- response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'], aws_retry=True)
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params["VpcId"], aws_retry=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, 'Failed to detach gateway')
+ module.fail_json_aws(e, "Failed to detach gateway")
- status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], "detached")
if not status_achieved:
- module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
+ module.fail_json(msg="Error waiting for vpc to detach from vgw - please check the AWS console")
result = response
return result
@@ -261,37 +262,37 @@ def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
def create_vgw(client, module):
params = dict()
- params['Type'] = module.params.get('type')
- tags = module.params.get('tags') or {}
- tags['Name'] = module.params.get('name')
- params['TagSpecifications'] = boto3_tag_specifications(tags, ['vpn-gateway'])
- if module.params.get('asn'):
- params['AmazonSideAsn'] = module.params.get('asn')
+ params["Type"] = module.params.get("type")
+ tags = module.params.get("tags") or {}
+ tags["Name"] = module.params.get("name")
+ params["TagSpecifications"] = boto3_tag_specifications(tags, ["vpn-gateway"])
+ if module.params.get("asn"):
+ params["AmazonSideAsn"] = module.params.get("asn")
try:
response = client.create_vpn_gateway(aws_retry=True, **params)
- get_waiter(
- client, 'vpn_gateway_exists'
- ).wait(
- VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']]
- )
+ get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]])
except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']))
- except is_boto3_error_code('VpnGatewayLimitExceeded') as e:
+ module.fail_json_aws(
+ e, msg=f"Failed to wait for Vpn Gateway {response['VpnGateway']['VpnGatewayId']} to be available"
+ )
+ except is_boto3_error_code("VpnGatewayLimitExceeded") as e:
module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to create gateway')
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to create gateway")
result = response
return result
def delete_vgw(client, module, vpn_gateway_id):
-
try:
response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id, aws_retry=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to delete gateway')
+ module.fail_json_aws(e, msg="Failed to delete gateway")
# return the deleted VpnGatewayId as this is not included in the above response
result = vpn_gateway_id
@@ -300,13 +301,13 @@ def delete_vgw(client, module, vpn_gateway_id):
def find_vpc(client, module):
params = dict()
- params['vpc_id'] = module.params.get('vpc_id')
+ params["vpc_id"] = module.params.get("vpc_id")
- if params['vpc_id']:
+ if params["vpc_id"]:
try:
- response = client.describe_vpcs(VpcIds=[params['vpc_id']], aws_retry=True)
+ response = client.describe_vpcs(VpcIds=[params["vpc_id"]], aws_retry=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to describe VPC')
+ module.fail_json_aws(e, msg="Failed to describe VPC")
result = response
return result
@@ -315,66 +316,68 @@ def find_vpc(client, module):
def find_vgw(client, module, vpn_gateway_id=None):
params = dict()
if vpn_gateway_id:
- params['VpnGatewayIds'] = vpn_gateway_id
+ params["VpnGatewayIds"] = vpn_gateway_id
else:
- params['Filters'] = [
- {'Name': 'type', 'Values': [module.params.get('type')]},
- {'Name': 'tag:Name', 'Values': [module.params.get('name')]},
+ params["Filters"] = [
+ {"Name": "type", "Values": [module.params.get("type")]},
+ {"Name": "tag:Name", "Values": [module.params.get("name")]},
]
- if module.params.get('state') == 'present':
- params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']})
+ if module.params.get("state") == "present":
+ params["Filters"].append({"Name": "state", "Values": ["pending", "available"]})
try:
response = client.describe_vpn_gateways(aws_retry=True, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to describe gateway using filters')
+ module.fail_json_aws(e, msg="Failed to describe gateway using filters")
- return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId'])
+ return sorted(response["VpnGateways"], key=lambda k: k["VpnGatewayId"])
def ensure_vgw_present(client, module):
-
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will not create another vgw.
changed = False
params = dict()
result = dict()
- params['Name'] = module.params.get('name')
- params['VpcId'] = module.params.get('vpc_id')
- params['Type'] = module.params.get('type')
- params['Tags'] = module.params.get('tags')
- params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+ params["Name"] = module.params.get("name")
+ params["VpcId"] = module.params.get("vpc_id")
+ params["Type"] = module.params.get("type")
+ params["Tags"] = module.params.get("tags")
+ params["VpnGatewayIds"] = module.params.get("vpn_gateway_id")
# check that the vpc_id exists. If not, an exception is thrown
- if params['VpcId']:
+ if params["VpcId"]:
vpc = find_vpc(client, module)
# check if a gateway matching our module args already exists
existing_vgw = find_vgw(client, module)
if existing_vgw != []:
- vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
- desired_tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
+ vpn_gateway_id = existing_vgw[0]["VpnGatewayId"]
+ desired_tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
if desired_tags is None:
desired_tags = dict()
purge_tags = False
- tags = dict(Name=module.params.get('name'))
+ tags = dict(Name=module.params.get("name"))
tags.update(desired_tags)
- changed = ensure_ec2_tags(client, module, vpn_gateway_id, resource_type='vpn-gateway',
- tags=tags, purge_tags=purge_tags)
+ changed = ensure_ec2_tags(
+ client, module, vpn_gateway_id, resource_type="vpn-gateway", tags=tags, purge_tags=purge_tags
+ )
# if a vpc_id was provided, check if it exists and if it's attached
- if params['VpcId']:
-
- current_vpc_attachments = existing_vgw[0]['VpcAttachments']
-
- if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
- if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached':
+ if params["VpcId"]:
+ current_vpc_attachments = existing_vgw[0]["VpcAttachments"]
+
+ if current_vpc_attachments != [] and current_vpc_attachments[0]["State"] == "attached":
+ if (
+ current_vpc_attachments[0]["VpcId"] != params["VpcId"]
+ or current_vpc_attachments[0]["State"] != "attached"
+ ):
# detach the existing vpc from the virtual gateway
- vpc_to_detach = current_vpc_attachments[0]['VpcId']
+ vpc_to_detach = current_vpc_attachments[0]["VpcId"]
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
- get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id])
+ get_waiter(client, "vpn_gateway_detached").wait(VpnGatewayIds=[vpn_gateway_id])
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
changed = True
else:
@@ -386,10 +389,10 @@ def ensure_vgw_present(client, module):
else:
existing_vgw = find_vgw(client, module, [vpn_gateway_id])
- if existing_vgw[0]['VpcAttachments'] != []:
- if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if existing_vgw[0]["VpcAttachments"] != []:
+ if existing_vgw[0]["VpcAttachments"][0]["State"] == "attached":
# detach the vpc from the vgw
- vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"]
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
@@ -397,10 +400,10 @@ def ensure_vgw_present(client, module):
# create a new vgw
new_vgw = create_vgw(client, module)
changed = True
- vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
+ vpn_gateway_id = new_vgw["VpnGateway"]["VpnGatewayId"]
# if a vpc-id was supplied, attempt to attach it to the vgw
- if params['VpcId']:
+ if params["VpcId"]:
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
changed = True
@@ -411,45 +414,46 @@ def ensure_vgw_present(client, module):
def ensure_vgw_absent(client, module):
-
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will take steps to delete it.
changed = False
params = dict()
result = dict()
- params['Name'] = module.params.get('name')
- params['VpcId'] = module.params.get('vpc_id')
- params['Type'] = module.params.get('type')
- params['Tags'] = module.params.get('tags')
- params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+ params["Name"] = module.params.get("name")
+ params["VpcId"] = module.params.get("vpc_id")
+ params["Type"] = module.params.get("type")
+ params["Tags"] = module.params.get("tags")
+ params["VpnGatewayIds"] = module.params.get("vpn_gateway_id")
# check if a gateway matching our module args already exists
- if params['VpnGatewayIds']:
- existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
- if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
+ if params["VpnGatewayIds"]:
+ existing_vgw_with_id = find_vgw(client, module, [params["VpnGatewayIds"]])
+ if existing_vgw_with_id != [] and existing_vgw_with_id[0]["State"] != "deleted":
existing_vgw = existing_vgw_with_id
- if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
- if params['VpcId']:
- if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
- module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+ if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached":
+ if params["VpcId"]:
+ if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]:
+ module.fail_json(
+ msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console"
+ )
else:
# detach the vpc from the vgw
- detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
- deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ detach_vgw(client, module, params["VpnGatewayIds"], params["VpcId"])
+ deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"])
changed = True
else:
# attempt to detach any attached vpcs
- vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
- detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
- deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"]
+ detach_vgw(client, module, params["VpnGatewayIds"], vpc_to_detach)
+ deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"])
changed = True
else:
# no vpc's are attached so attempt to delete the vgw
- deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ deleted_vgw = delete_vgw(client, module, params["VpnGatewayIds"])
changed = True
else:
@@ -458,20 +462,22 @@ def ensure_vgw_absent(client, module):
else:
# Check that a name and type argument has been supplied if no vgw-id
- if not module.params.get('name') or not module.params.get('type'):
- module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied')
+ if not module.params.get("name") or not module.params.get("type"):
+ module.fail_json(msg="A name and type is required when no vgw-id and a status of 'absent' is supplied")
existing_vgw = find_vgw(client, module)
- if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
- vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
- if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
- if params['VpcId']:
- if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
- module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+ if existing_vgw != [] and existing_vgw[0]["State"] != "deleted":
+ vpn_gateway_id = existing_vgw[0]["VpnGatewayId"]
+ if existing_vgw[0]["VpcAttachments"] != [] and existing_vgw[0]["VpcAttachments"][0]["State"] == "attached":
+ if params["VpcId"]:
+ if params["VpcId"] != existing_vgw[0]["VpcAttachments"][0]["VpcId"]:
+ module.fail_json(
+ msg="The vpc-id provided does not match the vpc-id currently attached - please check the AWS console"
+ )
else:
# detach the vpc from the vgw
- detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
+ detach_vgw(client, module, vpn_gateway_id, params["VpcId"])
# now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
@@ -479,7 +485,7 @@ def ensure_vgw_absent(client, module):
else:
# attempt to detach any attached vpcs
- vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ vpc_to_detach = existing_vgw[0]["VpcAttachments"][0]["VpcId"]
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
@@ -501,29 +507,28 @@ def ensure_vgw_absent(client, module):
def main():
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
+ state=dict(default="present", choices=["present", "absent"]),
name=dict(),
vpn_gateway_id=dict(),
vpc_id=dict(),
- asn=dict(type='int'),
- wait_timeout=dict(type='int', default=320),
- type=dict(default='ipsec.1', choices=['ipsec.1']),
- tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, type='bool'),
+ asn=dict(type="int"),
+ wait_timeout=dict(type="int", default=320),
+ type=dict(default="ipsec.1", choices=["ipsec.1"]),
+ tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['name']]])
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[["state", "present", ["name"]]])
- state = module.params.get('state').lower()
+ state = module.params.get("state").lower()
- client = module.client('ec2', retry_decorator=VGWRetry.jittered_backoff(retries=10))
+ client = module.client("ec2", retry_decorator=VGWRetry.jittered_backoff(retries=10))
- if state == 'present':
+ if state == "present":
(changed, results) = ensure_vgw_present(client, module)
else:
(changed, results) = ensure_vgw_absent(client, module)
module.exit_json(changed=changed, vgw=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
index fcb520cf0..6ab311c03 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vgw_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_vpc_vgw_info
version_added: 1.0.0
@@ -28,12 +26,12 @@ options:
author:
- "Nick Aslanidis (@naslanidis)"
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all virtual gateways for an account or profile
@@ -47,7 +45,7 @@ EXAMPLES = r'''
region: ap-southeast-2
profile: production
filters:
- "tag:Name": "main-virt-gateway"
+ "tag:Name": "main-virt-gateway"
register: vgw_info
- name: Gather information about a specific virtual gateway by VpnGatewayIds
@@ -56,9 +54,9 @@ EXAMPLES = r'''
profile: production
vpn_gateway_ids: vgw-c432f6a7
register: vgw_info
-'''
+"""
-RETURN = r'''
+RETURN = r"""
virtual_gateways:
description: The virtual gateways for the account.
returned: always
@@ -121,7 +119,7 @@ virtual_gateways:
type: dict
returned: success
example: {"MyKey": "MyValue"}
-'''
+"""
try:
import botocore
@@ -130,19 +128,20 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_virtual_gateway_info(virtual_gateway):
- tags = virtual_gateway.get('Tags', [])
+ tags = virtual_gateway.get("Tags", [])
resource_tags = boto3_tag_list_to_ansible_dict(tags)
virtual_gateway_info = dict(
- VpnGatewayId=virtual_gateway['VpnGatewayId'],
- State=virtual_gateway['State'],
- Type=virtual_gateway['Type'],
- VpcAttachments=virtual_gateway['VpcAttachments'],
+ VpnGatewayId=virtual_gateway["VpnGatewayId"],
+ State=virtual_gateway["State"],
+ Type=virtual_gateway["Type"],
+ VpcAttachments=virtual_gateway["VpcAttachments"],
Tags=tags,
ResourceTags=resource_tags,
)
@@ -152,32 +151,34 @@ def get_virtual_gateway_info(virtual_gateway):
def list_virtual_gateways(client, module):
params = dict()
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
+ params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
if module.params.get("vpn_gateway_ids"):
- params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids")
+ params["VpnGatewayIds"] = module.params.get("vpn_gateway_ids")
try:
all_virtual_gateways = client.describe_vpn_gateways(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to list gateways")
- return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=['ResourceTags'])
- for vgw in all_virtual_gateways['VpnGateways']]
+ return [
+ camel_dict_to_snake_dict(get_virtual_gateway_info(vgw), ignore_list=["ResourceTags"])
+ for vgw in all_virtual_gateways["VpnGateways"]
+ ]
def main():
argument_spec = dict(
- filters=dict(type='dict', default=dict()),
- vpn_gateway_ids=dict(type='list', default=None, elements='str'),
+ filters=dict(type="dict", default=dict()),
+ vpn_gateway_ids=dict(type="list", default=None, elements="str"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
try:
- connection = module.client('ec2')
+ connection = module.client("ec2")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
# call your function here
results = list_virtual_gateways(connection, module)
@@ -185,5 +186,5 @@ def main():
module.exit_json(virtual_gateways=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
index 77a994aaa..abc97f796 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_vpc_vpn
version_added: 1.0.0
@@ -14,11 +12,6 @@ short_description: Create, modify, and delete EC2 VPN connections
description:
- This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters
option or specifying the VPN connection identifier.
-extends_documentation_fragment:
- - amazon.aws.ec2
- - amazon.aws.aws
- - amazon.aws.boto3
- - amazon.aws.tags
author:
- "Sloane Hertel (@s-hertel)"
options:
@@ -42,6 +35,7 @@ options:
vpn_gateway_id:
description:
- The ID of the virtual private gateway.
+ - Mutually exclusive with I(transit_gateway_id).
type: str
vpn_connection_id:
description:
@@ -53,6 +47,12 @@ options:
default: False
type: bool
required: false
+ transit_gateway_id:
+ description:
+ - The ID of the transit gateway.
+ - Mutually exclusive with I(vpn_gateway_id).
+ type: str
+ version_added: 6.2.0
tunnel_options:
description:
- An optional list object containing no more than two dict members, each of which may contain I(TunnelInsideCidr)
@@ -135,18 +135,28 @@ options:
required: false
type: int
default: 15
-'''
+extends_documentation_fragment:
+ - amazon.aws.region.modules
+ - amazon.aws.common.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
EXAMPLES = r"""
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
+# Note: These examples do not set authentication details, see the AWS Guide for details.
-- name: create a VPN connection
+- name: create a VPN connection with vpn_gateway_id
community.aws.ec2_vpc_vpn:
state: present
vpn_gateway_id: vgw-XXXXXXXX
customer_gateway_id: cgw-XXXXXXXX
+- name: Attach a vpn connection to transit gateway
+ community.aws.ec2_vpc_vpn:
+ state: present
+ transit_gateway_id: tgw-XXXXXXXX
+ customer_gateway_id: cgw-XXXXXXXX
+
- name: modify VPN connection tags
community.aws.ec2_vpc_vpn:
state: present
@@ -233,6 +243,12 @@ vpn_gateway_id:
returned: I(state=present)
sample:
vpn_gateway_id: vgw-cb0ae2a2
+transit_gateway_id:
+ description: The transit gateway id to which the vpn connection can be attached.
+ type: str
+ returned: I(state=present)
+ sample:
+ transit_gateway_id: tgw-cb0ae2a2
options:
description: The VPN connection options (currently only containing static_routes_only).
type: complex
@@ -293,19 +309,23 @@ vpn_connection_id:
vpn_connection_id: vpn-781e0e19
"""
-from ansible.module_utils._text import to_text
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-
try:
- from botocore.exceptions import BotoCoreError, ClientError, WaiterError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import WaiterError
except ImportError:
pass # Handled by AnsibleAWSModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class VPNConnectionException(Exception):
def __init__(self, msg, exception=None):
@@ -319,11 +339,14 @@ class VPNConnectionException(Exception):
class VPNRetry(AWSRetry):
@staticmethod
def status_code_from_exception(error):
- return (error.response['Error']['Code'], error.response['Error']['Message'],)
+ return (
+ error.response["Error"]["Code"],
+ error.response["Error"]["Message"],
+ )
@staticmethod
def found(response_code, catch_extra_error_codes=None):
- retry_on = ['The maximum number of mutating objects has been reached.']
+ retry_on = ["The maximum number of mutating objects has been reached."]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
@@ -338,14 +361,14 @@ class VPNRetry(AWSRetry):
def find_connection(connection, module_params, vpn_connection_id=None):
- ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None,
- or raise an error if there were multiple viable connections. '''
+ """Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None,
+ or raise an error if there were multiple viable connections."""
- filters = module_params.get('filters')
+ filters = module_params.get("filters")
# vpn_connection_id may be provided via module option; takes precedence over any filter values
- if not vpn_connection_id and module_params.get('vpn_connection_id'):
- vpn_connection_id = module_params.get('vpn_connection_id')
+ if not vpn_connection_id and module_params.get("vpn_connection_id"):
+ vpn_connection_id = module_params.get("vpn_connection_id")
if not isinstance(vpn_connection_id, list) and vpn_connection_id:
vpn_connection_id = [to_text(vpn_connection_id)]
@@ -360,14 +383,13 @@ def find_connection(connection, module_params, vpn_connection_id=None):
# see if there is a unique matching connection
try:
if vpn_connection_id:
- existing_conn = connection.describe_vpn_connections(aws_retry=True,
- VpnConnectionIds=vpn_connection_id,
- Filters=formatted_filter)
+ existing_conn = connection.describe_vpn_connections(
+ aws_retry=True, VpnConnectionIds=vpn_connection_id, Filters=formatted_filter
+ )
else:
existing_conn = connection.describe_vpn_connections(aws_retry=True, Filters=formatted_filter)
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed while describing VPN connection.",
- exception=e)
+ raise VPNConnectionException(msg="Failed while describing VPN connection.", exception=e)
return find_connection_response(connections=existing_conn)
@@ -375,48 +397,56 @@ def find_connection(connection, module_params, vpn_connection_id=None):
def add_routes(connection, vpn_connection_id, routes_to_add):
for route in routes_to_add:
try:
- connection.create_vpn_connection_route(aws_retry=True,
- VpnConnectionId=vpn_connection_id,
- DestinationCidrBlock=route)
+ connection.create_vpn_connection_route(
+ aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route
+ )
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id),
- exception=e)
+ raise VPNConnectionException(
+ msg=f"Failed while adding route {route} to the VPN connection {vpn_connection_id}.",
+ exception=e,
+ )
def remove_routes(connection, vpn_connection_id, routes_to_remove):
for route in routes_to_remove:
try:
- connection.delete_vpn_connection_route(aws_retry=True,
- VpnConnectionId=vpn_connection_id,
- DestinationCidrBlock=route)
+ connection.delete_vpn_connection_route(
+ aws_retry=True, VpnConnectionId=vpn_connection_id, DestinationCidrBlock=route
+ )
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id),
- exception=e)
+ raise VPNConnectionException(
+ msg=f"Failed to remove route {route} from the VPN connection {vpn_connection_id}.",
+ exception=e,
+ )
def create_filter(module_params, provided_filters):
- """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """
- boto3ify_filter = {'cgw-config': 'customer-gateway-configuration',
- 'static-routes-only': 'option.static-routes-only',
- 'cidr': 'route.destination-cidr-block',
- 'bgp': 'bgp-asn',
- 'vpn': 'vpn-connection-id',
- 'vgw': 'vpn-gateway-id',
- 'tag-keys': 'tag-key',
- 'tag-values': 'tag-value',
- 'tags': 'tag',
- 'cgw': 'customer-gateway-id'}
+ """Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task"""
+ boto3ify_filter = {
+ "cgw-config": "customer-gateway-configuration",
+ "static-routes-only": "option.static-routes-only",
+ "cidr": "route.destination-cidr-block",
+ "bgp": "bgp-asn",
+ "vpn": "vpn-connection-id",
+ "vgw": "vpn-gateway-id",
+ "tag-keys": "tag-key",
+ "tag-values": "tag-value",
+ "tags": "tag",
+ "cgw": "customer-gateway-id",
+ }
# unmodifiable options and their filter name counterpart
- param_to_filter = {"customer_gateway_id": "customer-gateway-id",
- "vpn_gateway_id": "vpn-gateway-id",
- "vpn_connection_id": "vpn-connection-id"}
+ param_to_filter = {
+ "customer_gateway_id": "customer-gateway-id",
+ "vpn_gateway_id": "vpn-gateway-id",
+ "transit_gateway_id": "transit-gateway-id",
+ "vpn_connection_id": "vpn-connection-id",
+ }
flat_filter_dict = {}
formatted_filter = []
for raw_param in dict(provided_filters):
-
# fix filter names to be recognized by boto3
if raw_param in boto3ify_filter:
param = boto3ify_filter[raw_param]
@@ -424,17 +454,17 @@ def create_filter(module_params, provided_filters):
elif raw_param in list(boto3ify_filter.items()):
param = raw_param
else:
- raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param))
+ raise VPNConnectionException(msg=f"{raw_param} is not a valid filter.")
# reformat filters with special formats
- if param == 'tag':
+ if param == "tag":
for key in provided_filters[param]:
- formatted_key = 'tag:' + key
+ formatted_key = "tag:" + key
if isinstance(provided_filters[param][key], list):
flat_filter_dict[formatted_key] = str(provided_filters[param][key])
else:
flat_filter_dict[formatted_key] = [str(provided_filters[param][key])]
- elif param == 'option.static-routes-only':
+ elif param == "option.static-routes-only":
flat_filter_dict[param] = [str(provided_filters[param]).lower()]
else:
if isinstance(provided_filters[param], list):
@@ -448,25 +478,25 @@ def create_filter(module_params, provided_filters):
flat_filter_dict[param_to_filter[param]] = [module_params.get(param)]
# change the flat dict into something boto3 will understand
- formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()]
+ formatted_filter = [{"Name": key, "Values": value} for key, value in flat_filter_dict.items()]
return formatted_filter
def find_connection_response(connections=None):
- """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found,
- returns None if the connection does not exist, raise an error if multiple matches are found. """
+ """Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found,
+ returns None if the connection does not exist, raise an error if multiple matches are found."""
# Found no connections
- if not connections or 'VpnConnections' not in connections:
+ if not connections or "VpnConnections" not in connections:
return None
# Too many results
- elif connections and len(connections['VpnConnections']) > 1:
+ elif connections and len(connections["VpnConnections"]) > 1:
viable = []
- for each in connections['VpnConnections']:
+ for each in connections["VpnConnections"]:
# deleted connections are not modifiable
- if each['State'] not in ("deleted", "deleting"):
+ if each["State"] not in ("deleted", "deleting"):
viable.append(each)
if len(viable) == 1:
# Found one viable result; return unique match
@@ -475,20 +505,34 @@ def find_connection_response(connections=None):
# Found a result but it was deleted already; since there was only one viable result create a new one
return None
else:
- raise VPNConnectionException(msg="More than one matching VPN connection was found. "
- "To modify or delete a VPN please specify vpn_connection_id or add filters.")
+ raise VPNConnectionException(
+ msg=(
+ "More than one matching VPN connection was found. "
+ "To modify or delete a VPN please specify vpn_connection_id or add filters."
+ )
+ )
# Found unique match
- elif connections and len(connections['VpnConnections']) == 1:
+ elif connections and len(connections["VpnConnections"]) == 1:
# deleted connections are not modifiable
- if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"):
- return connections['VpnConnections'][0]
-
-
-def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None):
- """ Creates a VPN connection """
-
- options = {'StaticRoutesOnly': static_only}
+ if connections["VpnConnections"][0]["State"] not in ("deleted", "deleting"):
+ return connections["VpnConnections"][0]
+
+
+def create_connection(
+ connection,
+ customer_gateway_id,
+ static_only,
+ vpn_gateway_id,
+ transit_gateway_id,
+ connection_type,
+ max_attempts,
+ delay,
+ tunnel_options=None,
+):
+ """Creates a VPN connection"""
+
+ options = {"StaticRoutesOnly": static_only}
if tunnel_options and len(tunnel_options) <= 2:
t_opt = []
for m in tunnel_options:
@@ -498,108 +542,106 @@ def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_
raise TypeError("non-dict list member")
t_opt.append(m)
if t_opt:
- options['TunnelOptions'] = t_opt
+ options["TunnelOptions"] = t_opt
+
+ if not (customer_gateway_id and (vpn_gateway_id or transit_gateway_id)):
+ raise VPNConnectionException(
+ msg=(
+ "No matching connection was found. To create a new connection you must provide "
+ "customer_gateway_id and one of either transit_gateway_id or vpn_gateway_id."
+ )
+ )
+ vpn_connection_params = {"Type": connection_type, "CustomerGatewayId": customer_gateway_id, "Options": options}
+ if vpn_gateway_id:
+ vpn_connection_params["VpnGatewayId"] = vpn_gateway_id
+ if transit_gateway_id:
+ vpn_connection_params["TransitGatewayId"] = transit_gateway_id
- if not (customer_gateway_id and vpn_gateway_id):
- raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide "
- "both vpn_gateway_id and customer_gateway_id.")
try:
- vpn = connection.create_vpn_connection(Type=connection_type,
- CustomerGatewayId=customer_gateway_id,
- VpnGatewayId=vpn_gateway_id,
- Options=options)
- connection.get_waiter('vpn_connection_available').wait(
- VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']],
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ vpn = connection.create_vpn_connection(**vpn_connection_params)
+ connection.get_waiter("vpn_connection_available").wait(
+ VpnConnectionIds=[vpn["VpnConnection"]["VpnConnectionId"]],
+ WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts},
)
except WaiterError as e:
- raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']),
- exception=e)
+ raise VPNConnectionException(
+ msg=f"Failed to wait for VPN connection {vpn['VpnConnection']['VpnConnectionId']} to be available",
+ exception=e,
+ )
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to create VPN connection",
- exception=e)
+ raise VPNConnectionException(msg="Failed to create VPN connection", exception=e)
- return vpn['VpnConnection']
+ return vpn["VpnConnection"]
def delete_connection(connection, vpn_connection_id, delay, max_attempts):
- """ Deletes a VPN connection """
+ """Deletes a VPN connection"""
try:
connection.delete_vpn_connection(aws_retry=True, VpnConnectionId=vpn_connection_id)
- connection.get_waiter('vpn_connection_deleted').wait(
- VpnConnectionIds=[vpn_connection_id],
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
+ connection.get_waiter("vpn_connection_deleted").wait(
+ VpnConnectionIds=[vpn_connection_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}
)
except WaiterError as e:
- raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id),
- exception=e)
+ raise VPNConnectionException(
+ msg=f"Failed to wait for VPN connection {vpn_connection_id} to be removed", exception=e
+ )
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id),
- exception=e)
+ raise VPNConnectionException(msg=f"Failed to delete the VPN connection: {vpn_connection_id}", exception=e)
def add_tags(connection, vpn_connection_id, add):
try:
- connection.create_tags(aws_retry=True,
- Resources=[vpn_connection_id],
- Tags=add)
+ connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add)
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add),
- exception=e)
+ raise VPNConnectionException(msg=f"Failed to add the tags: {add}.", exception=e)
def remove_tags(connection, vpn_connection_id, remove):
# format tags since they are a list in the format ['tag1', 'tag2', 'tag3']
- key_dict_list = [{'Key': tag} for tag in remove]
+ key_dict_list = [{"Key": tag} for tag in remove]
try:
- connection.delete_tags(aws_retry=True,
- Resources=[vpn_connection_id],
- Tags=key_dict_list)
+ connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list)
except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove),
- exception=e)
+ raise VPNConnectionException(msg=f"Failed to remove the tags: {remove}.", exception=e)
def check_for_update(connection, module_params, vpn_connection_id):
- """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """
- tags = module_params.get('tags')
- routes = module_params.get('routes')
- purge_tags = module_params.get('purge_tags')
- purge_routes = module_params.get('purge_routes')
+ """Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change."""
+ tags = module_params.get("tags")
+ routes = module_params.get("routes")
+ purge_tags = module_params.get("purge_tags")
+ purge_routes = module_params.get("purge_routes")
vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id)
current_attrs = camel_dict_to_snake_dict(vpn_connection)
# Initialize changes dict
- changes = {'tags_to_add': [],
- 'tags_to_remove': [],
- 'routes_to_add': [],
- 'routes_to_remove': []}
+ changes = {"tags_to_add": [], "tags_to_remove": [], "routes_to_add": [], "routes_to_remove": []}
# Get changes to tags
- current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value')
+ current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get("tags", []), "key", "value")
if tags is None:
- changes['tags_to_remove'] = []
- changes['tags_to_add'] = []
+ changes["tags_to_remove"] = []
+ changes["tags_to_add"] = []
else:
- tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags)
- changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add)
+ tags_to_add, changes["tags_to_remove"] = compare_aws_tags(current_tags, tags, purge_tags)
+ changes["tags_to_add"] = ansible_dict_to_boto3_tag_list(tags_to_add)
# Get changes to routes
- if 'Routes' in vpn_connection:
- current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']]
+ if "Routes" in vpn_connection:
+ current_routes = [route["DestinationCidrBlock"] for route in vpn_connection["Routes"]]
if purge_routes:
- changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes]
- changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes]
+ changes["routes_to_remove"] = [old_route for old_route in current_routes if old_route not in routes]
+ changes["routes_to_add"] = [new_route for new_route in routes if new_route not in current_routes]
# Check if nonmodifiable attributes are attempted to be modified
for attribute in current_attrs:
if attribute in ("tags", "routes", "state"):
continue
- elif attribute == 'options':
- will_be = module_params.get('static_only', None)
- is_now = bool(current_attrs[attribute]['static_routes_only'])
- attribute = 'static_only'
- elif attribute == 'type':
+ elif attribute == "options":
+ will_be = module_params.get("static_only", None)
+ is_now = bool(current_attrs[attribute]["static_routes_only"])
+ attribute = "static_only"
+ elif attribute == "type":
will_be = module_params.get("connection_type", None)
is_now = current_attrs[attribute]
else:
@@ -607,110 +649,118 @@ def check_for_update(connection, module_params, vpn_connection_id):
will_be = module_params.get(attribute, None)
if will_be is not None and to_text(will_be) != to_text(is_now):
- raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN "
- "connection attributes are tags and routes. The value you tried to change it to "
- "is {2}.".format(attribute, is_now, will_be))
+ raise VPNConnectionException(
+ msg=(
+ f"You cannot modify {attribute}, the current value of which is {is_now}. Modifiable VPN connection"
+ f" attributes are tags and routes. The value you tried to change it to is {will_be}."
+ )
+ )
return changes
def make_changes(connection, vpn_connection_id, changes):
- """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove',
- the values of which are lists (generated by check_for_update()).
+ """changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove',
+ the values of which are lists (generated by check_for_update()).
"""
changed = False
- if changes['tags_to_add']:
+ if changes["tags_to_add"]:
changed = True
- add_tags(connection, vpn_connection_id, changes['tags_to_add'])
+ add_tags(connection, vpn_connection_id, changes["tags_to_add"])
- if changes['tags_to_remove']:
+ if changes["tags_to_remove"]:
changed = True
- remove_tags(connection, vpn_connection_id, changes['tags_to_remove'])
+ remove_tags(connection, vpn_connection_id, changes["tags_to_remove"])
- if changes['routes_to_add']:
+ if changes["routes_to_add"]:
changed = True
- add_routes(connection, vpn_connection_id, changes['routes_to_add'])
+ add_routes(connection, vpn_connection_id, changes["routes_to_add"])
- if changes['routes_to_remove']:
+ if changes["routes_to_remove"]:
changed = True
- remove_routes(connection, vpn_connection_id, changes['routes_to_remove'])
+ remove_routes(connection, vpn_connection_id, changes["routes_to_remove"])
return changed
def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None):
- """ Returns the changes that would be made to a VPN Connection """
- state = module_params.get('state')
- if state == 'absent':
+ """Returns the changes that would be made to a VPN Connection"""
+ state = module_params.get("state")
+ if state == "absent":
if vpn_connection_id:
return True, {}
else:
return False, {}
changed = False
- results = {'customer_gateway_configuration': '',
- 'customer_gateway_id': module_params.get('customer_gateway_id'),
- 'vpn_gateway_id': module_params.get('vpn_gateway_id'),
- 'options': {'static_routes_only': module_params.get('static_only')},
- 'routes': [module_params.get('routes')]}
+ results = {
+ "customer_gateway_configuration": "",
+ "customer_gateway_id": module_params.get("customer_gateway_id"),
+ "vpn_gateway_id": module_params.get("vpn_gateway_id"),
+ "transit_gateway_id": module_params.get("transit_gateway_id"),
+ "options": {"static_routes_only": module_params.get("static_only")},
+ "routes": [module_params.get("routes")],
+ }
# get combined current tags and tags to set
- present_tags = module_params.get('tags')
+ present_tags = module_params.get("tags")
if present_tags is None:
pass
- elif current_state and 'Tags' in current_state:
- current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags'])
- tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get('purge_tags'))
+ elif current_state and "Tags" in current_state:
+ current_tags = boto3_tag_list_to_ansible_dict(current_state["Tags"])
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, present_tags, module_params.get("purge_tags"))
changed |= bool(tags_to_remove) or bool(tags_to_add)
- if module_params.get('purge_tags'):
+ if module_params.get("purge_tags"):
current_tags = {}
current_tags.update(present_tags)
- results['tags'] = current_tags
- elif module_params.get('tags'):
+ results["tags"] = current_tags
+ elif module_params.get("tags"):
changed = True
if present_tags:
- results['tags'] = present_tags
+ results["tags"] = present_tags
# get combined current routes and routes to add
- present_routes = module_params.get('routes')
- if current_state and 'Routes' in current_state:
- current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']]
- if module_params.get('purge_routes'):
+ present_routes = module_params.get("routes")
+ if current_state and "Routes" in current_state:
+ current_routes = [route["DestinationCidrBlock"] for route in current_state["Routes"]]
+ if module_params.get("purge_routes"):
if set(current_routes) != set(present_routes):
changed = True
elif set(present_routes) != set(current_routes):
if not set(present_routes) < set(current_routes):
changed = True
present_routes.extend([route for route in current_routes if route not in present_routes])
- elif module_params.get('routes'):
+ elif module_params.get("routes"):
changed = True
- results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes]
+ results["routes"] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes]
# return the vpn_connection_id if it's known
if vpn_connection_id:
- results['vpn_connection_id'] = vpn_connection_id
+ results["vpn_connection_id"] = vpn_connection_id
else:
changed = True
- results['vpn_connection_id'] = 'vpn-XXXXXXXX'
+ results["vpn_connection_id"] = "vpn-XXXXXXXX"
return changed, results
def ensure_present(connection, module_params, check_mode=False):
- """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """
+ """Creates and adds tags to a VPN connection. If the connection already exists update tags."""
vpn_connection = find_connection(connection, module_params)
changed = False
- delay = module_params.get('delay')
- max_attempts = module_params.get('wait_timeout') // delay
+ delay = module_params.get("delay")
+ max_attempts = module_params.get("wait_timeout") // delay
# No match but vpn_connection_id was specified.
- if not vpn_connection and module_params.get('vpn_connection_id'):
- raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?")
+ if not vpn_connection and module_params.get("vpn_connection_id"):
+ raise VPNConnectionException(
+ msg="There is no VPN connection available or pending with that id. Did you delete it?"
+ )
# Unique match was found. Check if attributes provided differ.
elif vpn_connection:
- vpn_connection_id = vpn_connection['VpnConnectionId']
+ vpn_connection_id = vpn_connection["VpnConnectionId"]
# check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove
changes = check_for_update(connection, module_params, vpn_connection_id)
if check_mode:
@@ -722,38 +772,43 @@ def ensure_present(connection, module_params, check_mode=False):
changed = True
if check_mode:
return get_check_mode_results(connection, module_params)
- vpn_connection = create_connection(connection,
- customer_gateway_id=module_params.get('customer_gateway_id'),
- static_only=module_params.get('static_only'),
- vpn_gateway_id=module_params.get('vpn_gateway_id'),
- connection_type=module_params.get('connection_type'),
- tunnel_options=module_params.get('tunnel_options'),
- max_attempts=max_attempts,
- delay=delay)
- changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId'])
- make_changes(connection, vpn_connection['VpnConnectionId'], changes)
+ vpn_connection = create_connection(
+ connection,
+ customer_gateway_id=module_params.get("customer_gateway_id"),
+ static_only=module_params.get("static_only"),
+ vpn_gateway_id=module_params.get("vpn_gateway_id"),
+ transit_gateway_id=module_params.get("transit_gateway_id"),
+ connection_type=module_params.get("connection_type"),
+ tunnel_options=module_params.get("tunnel_options"),
+ max_attempts=max_attempts,
+ delay=delay,
+ )
+ changes = check_for_update(connection, module_params, vpn_connection["VpnConnectionId"])
+ make_changes(connection, vpn_connection["VpnConnectionId"], changes)
# get latest version if a change has been made and make tags output nice before returning it
if vpn_connection:
- vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId'])
- if 'Tags' in vpn_connection:
- vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags'])
+ vpn_connection = find_connection(connection, module_params, vpn_connection["VpnConnectionId"])
+ if "Tags" in vpn_connection:
+ vpn_connection["Tags"] = boto3_tag_list_to_ansible_dict(vpn_connection["Tags"])
return changed, vpn_connection
def ensure_absent(connection, module_params, check_mode=False):
- """ Deletes a VPN connection if it exists. """
+ """Deletes a VPN connection if it exists."""
vpn_connection = find_connection(connection, module_params)
if check_mode:
- return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None)
+ return get_check_mode_results(
+ connection, module_params, vpn_connection["VpnConnectionId"] if vpn_connection else None
+ )
- delay = module_params.get('delay')
- max_attempts = module_params.get('wait_timeout') // delay
+ delay = module_params.get("delay")
+ max_attempts = module_params.get("wait_timeout") // delay
if vpn_connection:
- delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts)
+ delete_connection(connection, vpn_connection["VpnConnectionId"], delay=delay, max_attempts=max_attempts)
changed = True
else:
changed = False
@@ -763,32 +818,40 @@ def ensure_absent(connection, module_params, check_mode=False):
def main():
argument_spec = dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- filters=dict(type='dict', default={}),
- vpn_gateway_id=dict(type='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
- connection_type=dict(default='ipsec.1', type='str'),
- tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'),
- static_only=dict(default=False, type='bool'),
- customer_gateway_id=dict(type='str'),
- vpn_connection_id=dict(type='str'),
- purge_tags=dict(type='bool', default=True),
- routes=dict(type='list', default=[], elements='str'),
- purge_routes=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=600),
- delay=dict(type='int', default=15),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ filters=dict(type="dict", default={}),
+ vpn_gateway_id=dict(type="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ connection_type=dict(default="ipsec.1", type="str"),
+ transit_gateway_id=dict(type="str"),
+ tunnel_options=dict(no_log=True, type="list", default=[], elements="dict"),
+ static_only=dict(default=False, type="bool"),
+ customer_gateway_id=dict(type="str"),
+ vpn_connection_id=dict(type="str"),
+ purge_tags=dict(type="bool", default=True),
+ routes=dict(type="list", default=[], elements="str"),
+ purge_routes=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=600),
+ delay=dict(type="int", default=15),
+ )
+ mutually_exclusive = [
+ ["vpn_gateway_id", "transit_gateway_id"],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
- connection = module.client('ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10))
+ connection = module.client("ec2", retry_decorator=VPNRetry.jittered_backoff(retries=10))
- state = module.params.get('state')
+ state = module.params.get("state")
parameters = dict(module.params)
try:
- if state == 'present':
+ if state == "present":
changed, response = ensure_present(connection, parameters, module.check_mode)
- elif state == 'absent':
+ elif state == "absent":
changed, response = ensure_absent(connection, parameters, module.check_mode)
except VPNConnectionException as e:
if e.exception:
@@ -799,5 +862,5 @@ def main():
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
index c7a71f154..d304e4568 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_vpc_vpn_info.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ec2_vpc_vpn_info
version_added: 1.0.0
short_description: Gather information about VPN Connections in AWS.
description:
- - Gather information about VPN Connections in AWS.
-author: Madhura Naniwadekar (@Madhura-CSI)
+ - Gather information about VPN Connections in AWS.
+author:
+ - Madhura Naniwadekar (@Madhura-CSI)
options:
filters:
description:
@@ -30,13 +29,12 @@ options:
elements: str
default: []
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all vpn connections
community.aws.ec2_vpc_vpn_info:
@@ -52,9 +50,9 @@ EXAMPLES = r'''
filters:
vpn-gateway-id: vgw-cbe66beb
register: vpn_conn_info
-'''
+"""
-RETURN = r'''
+RETURN = r"""
vpn_connections:
description: List of one or more VPN Connections.
returned: always
@@ -158,30 +156,33 @@ vpn_connections:
returned: always
type: str
sample: vgw-cbe56bfb
-'''
+"""
import json
+
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- )
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
+ return obj.isoformat() if hasattr(obj, "isoformat") else obj
def list_vpn_connections(connection, module):
params = dict()
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
+ params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+ params["VpnConnectionIds"] = module.params.get("vpn_connection_ids")
try:
result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
@@ -189,28 +190,29 @@ def list_vpn_connections(connection, module):
module.fail_json_aws(e, msg="Cannot validate JSON data")
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
- snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
+ snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result["VpnConnections"]]
if snaked_vpn_connections:
for vpn_connection in snaked_vpn_connections:
- vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
+ vpn_connection["tags"] = boto3_tag_list_to_ansible_dict(vpn_connection.get("tags", []))
module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
def main():
-
argument_spec = dict(
- vpn_connection_ids=dict(default=[], type='list', elements='str'),
- filters=dict(default={}, type='dict')
+ vpn_connection_ids=dict(default=[], type="list", elements="str"),
+ filters=dict(default={}, type="dict"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[['vpn_connection_ids', 'filters']],
- supports_check_mode=True)
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["vpn_connection_ids", "filters"]],
+ supports_check_mode=True,
+ )
- connection = module.client('ec2')
+ connection = module.client("ec2")
list_vpn_connections(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ec2_win_password.py b/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
index 9b92c3e4f..a9ca8e94c 100644
--- a/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
+++ b/ansible_collections/community/aws/plugins/modules/ec2_win_password.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ec2_win_password
version_added: 1.0.0
short_description: Gets the default administrator password for EC2 Windows instances
description:
- - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
-author: "Rick Mendes (@rickmendes)"
+ - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
+author:
+ - "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
@@ -48,16 +47,18 @@ options:
default: 120
type: int
+requirements:
+ - cryptography
+
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-requirements:
-- cryptography
-'''
+RETURN = r""" # """
-EXAMPLES = '''
+EXAMPLES = r"""
# Example of getting a password
- name: get the Administrator password
community.aws.ec2_win_password:
@@ -92,7 +93,7 @@ EXAMPLES = '''
key_file: "~/aws-creds/my_test_key.pem"
wait: true
wait_timeout: 45
-'''
+"""
import datetime
import time
@@ -102,6 +103,7 @@ try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.primitives.serialization import load_pem_private_key
+
HAS_CRYPTOGRAPHY = True
except ImportError:
HAS_CRYPTOGRAPHY = False
@@ -113,47 +115,48 @@ except ImportError:
from ansible.module_utils._text import to_bytes
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def setup_module_object():
argument_spec = dict(
instance_id=dict(required=True),
- key_file=dict(required=False, default=None, type='path'),
+ key_file=dict(required=False, default=None, type="path"),
key_passphrase=dict(no_log=True, default=None, required=False),
key_data=dict(no_log=True, default=None, required=False),
- wait=dict(type='bool', default=False, required=False),
- wait_timeout=dict(default=120, required=False, type='int'),
+ wait=dict(type="bool", default=False, required=False),
+ wait_timeout=dict(default=120, required=False, type="int"),
)
- mutually_exclusive = [['key_file', 'key_data']]
+ mutually_exclusive = [["key_file", "key_data"]]
module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
return module
def _get_password(module, client, instance_id):
try:
- data = client.get_password_data(aws_retry=True, InstanceId=instance_id)['PasswordData']
+ data = client.get_password_data(aws_retry=True, InstanceId=instance_id)["PasswordData"]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg='Failed to get password data')
+ module.fail_json_aws(e, msg="Failed to get password data")
return data
def ec2_win_password(module):
- instance_id = module.params.get('instance_id')
- key_file = module.params.get('key_file')
- if module.params.get('key_passphrase') is None:
+ instance_id = module.params.get("instance_id")
+ key_file = module.params.get("key_file")
+ if module.params.get("key_passphrase") is None:
b_key_passphrase = None
else:
- b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')
- if module.params.get('key_data') is None:
+ b_key_passphrase = to_bytes(module.params.get("key_passphrase"), errors="surrogate_or_strict")
+ if module.params.get("key_data") is None:
b_key_data = None
else:
- b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
+ b_key_data = to_bytes(module.params.get("key_data"), errors="surrogate_or_strict")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
- client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff())
if wait:
start = datetime.datetime.now()
@@ -171,15 +174,15 @@ def ec2_win_password(module):
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
- module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
+ module.fail_json(msg=f"wait for password timeout after {int(wait_timeout)} seconds")
if key_file is not None and b_key_data is None:
try:
- with open(key_file, 'rb') as f:
+ with open(key_file, "rb") as f:
key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())
except IOError as e:
# Handle bad files
- module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
+ module.fail_json(msg=f"I/O error ({int(e.errno)}) opening key file: {e.strerror}")
except (ValueError, TypeError) as e:
# Handle issues loading key
module.fail_json(msg="unable to parse key file")
@@ -195,7 +198,7 @@ def ec2_win_password(module):
decrypted = None
if decrypted is None:
- module.fail_json(msg="unable to decrypt password", win_password='', changed=False)
+ module.fail_json(msg="unable to decrypt password", win_password="", changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
@@ -208,10 +211,10 @@ def main():
module = setup_module_object()
if not HAS_CRYPTOGRAPHY:
- module.fail_json(msg='cryptography package required for this module.')
+ module.fail_json(msg="cryptography package required for this module.")
ec2_win_password(module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_attribute.py b/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
index 6efe701d1..682014675 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_attribute.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ecs_attribute
version_added: 1.0.0
short_description: manage ecs attributes
description:
- - Create, update or delete ECS container instance attributes.
-author: Andrej Svenke (@anryko)
+ - Create, update or delete ECS container instance attributes.
+author:
+ - Andrej Svenke (@anryko)
options:
cluster:
description:
@@ -54,13 +53,12 @@ options:
required: true
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Set attributes
@@ -82,9 +80,9 @@ EXAMPLES = r'''
- flavor: test
- migrated
delegate_to: localhost
-'''
+"""
-RETURN = r'''
+RETURN = r"""
attributes:
description: attributes
type: complex
@@ -108,15 +106,16 @@ attributes:
description: value of the attribute
returned: if present
type: str
-'''
+"""
try:
import botocore
- from botocore.exceptions import ClientError, EndpointConnectionError
+ from botocore.exceptions import ClientError
+ from botocore.exceptions import EndpointConnectionError
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class EcsAttributes(object):
@@ -136,29 +135,27 @@ class EcsAttributes(object):
@staticmethod
def _validate_attrs(attrs):
- return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs)
+ return all(tuple(attr.keys()) in (("name", "value"), ("value", "name")) for attr in attrs)
def _parse_attrs(self, attrs):
attrs_parsed = []
for attr in attrs:
if isinstance(attr, dict):
if len(attr) != 1:
- self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr))
+ self.module.fail_json(msg=f"Incorrect attribute format - {str(attr)}")
name, value = list(attr.items())[0]
- attrs_parsed.append({'name': name, 'value': value})
+ attrs_parsed.append({"name": name, "value": value})
elif isinstance(attr, str):
- attrs_parsed.append({'name': attr, 'value': None})
+ attrs_parsed.append({"name": attr, "value": None})
else:
- self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs))
+ self.module.fail_json(msg=f"Incorrect attributes format - {str(attrs)}")
return attrs_parsed
def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False):
- attr_obj = {'targetType': 'container-instance',
- 'targetId': ecs_arn,
- 'name': name}
+ attr_obj = {"targetType": "container-instance", "targetId": ecs_arn, "name": name}
if not skip_value and value is not None:
- attr_obj['value'] = value
+ attr_obj["value"] = value
return attr_obj
@@ -187,41 +184,43 @@ class Ec2EcsInstance(object):
self.ec2_id = ec2_id
try:
- self.ecs = module.client('ecs')
+ self.ecs = module.client("ecs")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
self.ecs_arn = self._get_ecs_arn()
def _get_ecs_arn(self):
try:
- ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns']
- ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster,
- containerInstances=ecs_instances_arns)['containerInstances']
+ ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)["containerInstanceArns"]
+ ec2_instances = self.ecs.describe_container_instances(
+ cluster=self.cluster, containerInstances=ecs_instances_arns
+ )["containerInstances"]
except (ClientError, EndpointConnectionError) as e:
- self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
+ self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}")
try:
- ecs_arn = next(inst for inst in ec2_instances
- if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn']
+ ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[
+ "containerInstanceArn"
+ ]
except StopIteration:
- self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster))
+ self.module.fail_json(msg=f"EC2 instance Id not found in ECS cluster - {str(self.cluster)}")
return ecs_arn
def attrs_put(self, attrs):
"""Puts attributes on ECS container instance"""
try:
- self.ecs.put_attributes(cluster=self.cluster,
- attributes=attrs.get_for_ecs_arn(self.ecs_arn))
+ self.ecs.put_attributes(cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn))
except ClientError as e:
self.module.fail_json(msg=str(e))
def attrs_delete(self, attrs):
"""Deletes attributes from ECS container instance."""
try:
- self.ecs.delete_attributes(cluster=self.cluster,
- attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True))
+ self.ecs.delete_attributes(
+ cluster=self.cluster, attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True)
+ )
except ClientError as e:
self.module.fail_json(msg=str(e))
@@ -230,33 +229,33 @@ class Ec2EcsInstance(object):
Returns EcsAttributes object containing attributes from ECS container instance with names
matching to attrs.attributes (EcsAttributes Object).
"""
- attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']}
- for attr in attrs]
+ attr_objs = [{"targetType": "container-instance", "attributeName": attr["name"]} for attr in attrs]
try:
- matched_ecs_targets = [attr_found for attr_obj in attr_objs
- for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']]
+ matched_ecs_targets = [
+ attr_found
+ for attr_obj in attr_objs
+ for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"]
+ ]
except ClientError as e:
- self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
+ self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}")
- matched_objs = [target for target in matched_ecs_targets
- if target['targetId'] == self.ecs_arn]
+ matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn]
- results = [{'name': match['name'], 'value': match.get('value', None)}
- for match in matched_objs]
+ results = [{"name": match["name"], "value": match.get("value", None)} for match in matched_objs]
return EcsAttributes(self.module, results)
def main():
argument_spec = dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
- cluster=dict(required=True, type='str'),
- ec2_instance_id=dict(required=True, type='str'),
- attributes=dict(required=True, type='list', elements='dict'),
+ state=dict(required=False, default="present", choices=["present", "absent"]),
+ cluster=dict(required=True, type="str"),
+ ec2_instance_id=dict(required=True, type="str"),
+ attributes=dict(required=True, type="list", elements="dict"),
)
- required_together = [['cluster', 'ec2_instance_id', 'attributes']]
+ required_together = [["cluster", "ec2_instance_id", "attributes"]]
module = AnsibleAWSModule(
argument_spec=argument_spec,
@@ -264,39 +263,43 @@ def main():
required_together=required_together,
)
- cluster = module.params['cluster']
- ec2_instance_id = module.params['ec2_instance_id']
- attributes = module.params['attributes']
+ cluster = module.params["cluster"]
+ ec2_instance_id = module.params["ec2_instance_id"]
+ attributes = module.params["attributes"]
conti = Ec2EcsInstance(module, cluster, ec2_instance_id)
attrs = EcsAttributes(module, attributes)
- results = {'changed': False,
- 'attributes': [
- {'cluster': cluster,
- 'ec2_instance_id': ec2_instance_id,
- 'attributes': attributes}
- ]}
+ results = {
+ "changed": False,
+ "attributes": [
+ {
+ "cluster": cluster,
+ "ec2_instance_id": ec2_instance_id,
+ "attributes": attributes,
+ }
+ ],
+ }
attrs_present = conti.attrs_get_by_name(attrs)
- if module.params['state'] == 'present':
+ if module.params["state"] == "present":
attrs_diff = attrs.diff(attrs_present)
if not attrs_diff:
module.exit_json(**results)
conti.attrs_put(attrs_diff)
- results['changed'] = True
+ results["changed"] = True
- elif module.params['state'] == 'absent':
+ elif module.params["state"] == "absent":
if not attrs_present:
module.exit_json(**results)
conti.attrs_delete(attrs_present)
- results['changed'] = True
+ results["changed"] = True
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_cluster.py b/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
index 347e2173e..7d427a58d 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_cluster.py
@@ -1,22 +1,21 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ecs_cluster
version_added: 1.0.0
short_description: Create or terminate ECS clusters.
notes:
- - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- - It will also wait for a cluster to have instances registered to it.
+ - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
+ - It will also wait for a cluster to have instances registered to it.
description:
- - Creates or terminates ecs clusters.
-author: Mark Chance (@Java1Guy)
+ - Creates or terminates ecs clusters.
+author:
+ - Mark Chance (@Java1Guy)
options:
state:
description:
@@ -78,13 +77,12 @@ options:
type: bool
default: false
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Cluster creation
@@ -105,7 +103,7 @@ EXAMPLES = '''
weight: 1
- capacity_provider: FARGATE_SPOT
weight: 100
- purge_capacity_providers: True
+ purge_capacity_providers: true
- name: Cluster deletion
community.aws.ecs_cluster:
@@ -119,9 +117,9 @@ EXAMPLES = '''
delay: 10
repeat: 10
register: task_output
+"""
-'''
-RETURN = '''
+RETURN = r"""
activeServicesCount:
description: how many services are active in this cluster
returned: 0 if a new cluster
@@ -163,7 +161,7 @@ status:
returned: always
type: str
sample: ACTIVE
-'''
+"""
import time
@@ -172,9 +170,10 @@ try:
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class EcsClusterManager:
@@ -183,76 +182,75 @@ class EcsClusterManager:
def __init__(self, module):
self.module = module
try:
- self.ecs = module.client('ecs')
+ self.ecs = module.client("ecs")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
+ def find_in_array(self, array_of_clusters, cluster_name, field_name="clusterArn"):
for c in array_of_clusters:
if c[field_name].endswith(cluster_name):
return c
return None
def describe_cluster(self, cluster_name):
- response = self.ecs.describe_clusters(clusters=[
- cluster_name
- ])
- if len(response['failures']) > 0:
- c = self.find_in_array(response['failures'], cluster_name, 'arn')
- if c and c['reason'] == 'MISSING':
+ response = self.ecs.describe_clusters(clusters=[cluster_name])
+ if len(response["failures"]) > 0:
+ c = self.find_in_array(response["failures"], cluster_name, "arn")
+ if c and c["reason"] == "MISSING":
return None
# fall thru and look through found ones
- if len(response['clusters']) > 0:
- c = self.find_in_array(response['clusters'], cluster_name)
+ if len(response["clusters"]) > 0:
+ c = self.find_in_array(response["clusters"], cluster_name)
if c:
return c
- raise Exception("Unknown problem describing cluster %s." % cluster_name)
+ raise Exception(f"Unknown problem describing cluster {cluster_name}.")
def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy):
params = dict(clusterName=cluster_name)
if capacity_providers:
- params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers)
+ params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers)
if capacity_provider_strategy:
- params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy)
+ params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy)
response = self.ecs.create_cluster(**params)
- return response['cluster']
+ return response["cluster"]
def update_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy):
params = dict(cluster=cluster_name)
if capacity_providers:
- params['capacityProviders'] = snake_dict_to_camel_dict(capacity_providers)
+ params["capacityProviders"] = snake_dict_to_camel_dict(capacity_providers)
else:
- params['capacityProviders'] = []
+ params["capacityProviders"] = []
if capacity_provider_strategy:
- params['defaultCapacityProviderStrategy'] = snake_dict_to_camel_dict(capacity_provider_strategy)
+ params["defaultCapacityProviderStrategy"] = snake_dict_to_camel_dict(capacity_provider_strategy)
else:
- params['defaultCapacityProviderStrategy'] = []
+ params["defaultCapacityProviderStrategy"] = []
response = self.ecs.put_cluster_capacity_providers(**params)
- return response['cluster']
+ return response["cluster"]
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
-
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent', 'has_instances']),
- name=dict(required=True, type='str'),
- delay=dict(required=False, type='int', default=10),
- repeat=dict(required=False, type='int', default=10),
- purge_capacity_providers=dict(required=False, type='bool', default=False),
- capacity_providers=dict(required=False, type='list', elements='str'),
- capacity_provider_strategy=dict(required=False,
- type='list',
- elements='dict',
- options=dict(capacity_provider=dict(type='str'),
- weight=dict(type='int'),
- base=dict(type='int', default=0)
- )
- ),
+ state=dict(required=True, choices=["present", "absent", "has_instances"]),
+ name=dict(required=True, type="str"),
+ delay=dict(required=False, type="int", default=10),
+ repeat=dict(required=False, type="int", default=10),
+ purge_capacity_providers=dict(required=False, type="bool", default=False),
+ capacity_providers=dict(required=False, type="list", elements="str"),
+ capacity_provider_strategy=dict(
+ required=False,
+ type="list",
+ elements="dict",
+ options=dict(
+ capacity_provider=dict(type="str"),
+ weight=dict(type="int"),
+ base=dict(type="int", default=0),
+ ),
+ ),
)
- required_together = [['state', 'name']]
+ required_together = [["state", "name"]]
module = AnsibleAWSModule(
argument_spec=argument_spec,
@@ -262,19 +260,19 @@ def main():
cluster_mgr = EcsClusterManager(module)
try:
- existing = cluster_mgr.describe_cluster(module.params['name'])
+ existing = cluster_mgr.describe_cluster(module.params["name"])
except Exception as e:
- module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
+ module.fail_json(msg="Exception describing cluster '" + module.params["name"] + "': " + str(e))
results = dict(changed=False)
- if module.params['state'] == 'present':
+ if module.params["state"] == "present":
# Pull requested and existing capacity providers and strategies.
- purge_capacity_providers = module.params['purge_capacity_providers']
- requested_cp = module.params['capacity_providers']
- requested_cps = module.params['capacity_provider_strategy']
- if existing and 'status' in existing and existing['status'] == "ACTIVE":
- existing_cp = existing['capacityProviders']
- existing_cps = existing['defaultCapacityProviderStrategy']
+ purge_capacity_providers = module.params["purge_capacity_providers"]
+ requested_cp = module.params["capacity_providers"]
+ requested_cps = module.params["capacity_provider_strategy"]
+ if existing and "status" in existing and existing["status"] == "ACTIVE":
+ existing_cp = existing["capacityProviders"]
+ existing_cps = existing["defaultCapacityProviderStrategy"]
if requested_cp is None:
requested_cp = []
@@ -293,9 +291,12 @@ def main():
# Unless purge_capacity_providers is true, we will not be updating the providers or strategy.
if not purge_capacity_providers:
- module.deprecate('After 2024-06-01 the default value of purge_capacity_providers will change from false to true.'
- ' To maintain the existing behaviour explicitly set purge_capacity_providers=true',
- date='2024-06-01', collection_name='community.aws')
+ module.deprecate(
+ "After 2024-06-01 the default value of purge_capacity_providers will change from false to true."
+ " To maintain the existing behaviour explicitly set purge_capacity_providers=true",
+ date="2024-06-01",
+ collection_name="community.aws",
+ )
cps_update_needed = False
requested_cp = existing_cp
requested_cps = existing_cps
@@ -303,57 +304,67 @@ def main():
# If either the providers or strategy differ, update the cluster.
if requested_cp != existing_cp or cps_update_needed:
if not module.check_mode:
- results['cluster'] = cluster_mgr.update_cluster(cluster_name=module.params['name'],
- capacity_providers=requested_cp,
- capacity_provider_strategy=requested_cps)
- results['changed'] = True
+ results["cluster"] = cluster_mgr.update_cluster(
+ cluster_name=module.params["name"],
+ capacity_providers=requested_cp,
+ capacity_provider_strategy=requested_cps,
+ )
+ results["changed"] = True
else:
- results['cluster'] = existing
+ results["cluster"] = existing
else:
if not module.check_mode:
# doesn't exist. create it.
- results['cluster'] = cluster_mgr.create_cluster(cluster_name=module.params['name'],
- capacity_providers=requested_cp,
- capacity_provider_strategy=requested_cps)
- results['changed'] = True
+ results["cluster"] = cluster_mgr.create_cluster(
+ cluster_name=module.params["name"],
+ capacity_providers=requested_cp,
+ capacity_provider_strategy=requested_cps,
+ )
+ results["changed"] = True
# delete the cluster
- elif module.params['state'] == 'absent':
+ elif module.params["state"] == "absent":
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
- results['cluster'] = existing
- if 'status' in existing and existing['status'] == "INACTIVE":
- results['changed'] = False
+ results["cluster"] = existing
+ if "status" in existing and existing["status"] == "INACTIVE":
+ results["changed"] = False
else:
if not module.check_mode:
- cluster_mgr.delete_cluster(module.params['name'])
- results['changed'] = True
- elif module.params['state'] == 'has_instances':
+ cluster_mgr.delete_cluster(module.params["name"])
+ results["changed"] = True
+ elif module.params["state"] == "has_instances":
if not existing:
- module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
+ module.fail_json(msg="Cluster '" + module.params["name"] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
- delay = module.params['delay']
- repeat = module.params['repeat']
+ delay = module.params["delay"]
+ repeat = module.params["repeat"]
time.sleep(delay)
count = 0
for i in range(repeat):
- existing = cluster_mgr.describe_cluster(module.params['name'])
- count = existing['registeredContainerInstancesCount']
+ existing = cluster_mgr.describe_cluster(module.params["name"])
+ count = existing["registeredContainerInstancesCount"]
if count > 0:
- results['changed'] = True
+ results["changed"] = True
break
time.sleep(delay)
if count == 0 and i is repeat - 1:
- module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
+ module.fail_json(
+ msg="Cluster instance count still zero after "
+ + str(repeat)
+ + " tries of "
+ + str(delay)
+ + " seconds each."
+ )
return
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_ecr.py b/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
index d83d5af2e..545b82742 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_ecr.py
@@ -1,15 +1,10 @@
#!/usr/bin/python
-# -*- coding: utf-8 -*
+# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ecs_ecr
version_added: 1.0.0
@@ -104,15 +99,14 @@ options:
type: dict
version_added: 5.2.0
author:
- - David M. Lee (@leedm777)
+ - David M. Lee (@leedm777)
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# If the repository does not exist, it is created. If it does exist, would not
# affect any policies already on it.
- name: ecr-repo
@@ -186,9 +180,9 @@ EXAMPLES = '''
encryption_configuration:
encryption_type: KMS
kms_key: custom-kms-key-alias
-'''
+"""
-RETURN = '''
+RETURN = r"""
state:
type: str
description: The asserted state of the repository (present, absent)
@@ -216,7 +210,7 @@ repository:
repositoryArn: arn:aws:ecr:us-east-1:123456789012:repository/ecr-test-1484664090
repositoryName: ecr-test-1484664090
repositoryUri: 123456789012.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
-'''
+"""
import json
import traceback
@@ -229,11 +223,11 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible.module_utils.six import string_types
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import sort_json_policy_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def build_kwargs(registry_id):
@@ -251,45 +245,45 @@ def build_kwargs(registry_id):
class EcsEcr:
def __init__(self, module):
- self.ecr = module.client('ecr')
- self.sts = module.client('sts')
+ self.ecr = module.client("ecr")
+ self.sts = module.client("sts")
self.check_mode = module.check_mode
self.changed = False
self.skipped = False
def get_repository(self, registry_id, name):
try:
- res = self.ecr.describe_repositories(
- repositoryNames=[name], **build_kwargs(registry_id))
- repos = res.get('repositories')
+ res = self.ecr.describe_repositories(repositoryNames=[name], **build_kwargs(registry_id))
+ repos = res.get("repositories")
return repos and repos[0]
- except is_boto3_error_code('RepositoryNotFoundException'):
+ except is_boto3_error_code("RepositoryNotFoundException"):
return None
def get_repository_policy(self, registry_id, name):
try:
- res = self.ecr.get_repository_policy(
- repositoryName=name, **build_kwargs(registry_id))
- text = res.get('policyText')
+ res = self.ecr.get_repository_policy(repositoryName=name, **build_kwargs(registry_id))
+ text = res.get("policyText")
return text and json.loads(text)
- except is_boto3_error_code(['RepositoryNotFoundException', 'RepositoryPolicyNotFoundException']):
+ except is_boto3_error_code(["RepositoryNotFoundException", "RepositoryPolicyNotFoundException"]):
return None
def create_repository(self, registry_id, name, image_tag_mutability, encryption_configuration):
if registry_id:
- default_registry_id = self.sts.get_caller_identity().get('Account')
+ default_registry_id = self.sts.get_caller_identity().get("Account")
if registry_id != default_registry_id:
- raise Exception('Cannot create repository in registry {0}.'
- 'Would be created in {1} instead.'.format(registry_id, default_registry_id))
+ raise Exception(
+ f"Cannot create repository in registry {registry_id}. Would be created in {default_registry_id} instead."
+ )
if encryption_configuration is None:
- encryption_configuration = dict(encryptionType='AES256')
+ encryption_configuration = dict(encryptionType="AES256")
if not self.check_mode:
repo = self.ecr.create_repository(
repositoryName=name,
imageTagMutability=image_tag_mutability,
- encryptionConfiguration=encryption_configuration).get('repository')
+ encryptionConfiguration=encryption_configuration,
+ ).get("repository")
self.changed = True
return repo
else:
@@ -299,10 +293,8 @@ class EcsEcr:
def set_repository_policy(self, registry_id, name, policy_text, force):
if not self.check_mode:
policy = self.ecr.set_repository_policy(
- repositoryName=name,
- policyText=policy_text,
- force=force,
- **build_kwargs(registry_id))
+ repositoryName=name, policyText=policy_text, force=force, **build_kwargs(registry_id)
+ )
self.changed = True
return policy
else:
@@ -310,15 +302,13 @@ class EcsEcr:
if self.get_repository(registry_id, name) is None:
printable = name
if registry_id:
- printable = '{0}:{1}'.format(registry_id, name)
- raise Exception(
- 'could not find repository {0}'.format(printable))
+ printable = f"{registry_id}:{name}"
+ raise Exception(f"could not find repository {printable}")
return
def delete_repository(self, registry_id, name, force):
if not self.check_mode:
- repo = self.ecr.delete_repository(
- repositoryName=name, force=force, **build_kwargs(registry_id))
+ repo = self.ecr.delete_repository(repositoryName=name, force=force, **build_kwargs(registry_id))
self.changed = True
return repo
else:
@@ -330,8 +320,7 @@ class EcsEcr:
def delete_repository_policy(self, registry_id, name):
if not self.check_mode:
- policy = self.ecr.delete_repository_policy(
- repositoryName=name, **build_kwargs(registry_id))
+ policy = self.ecr.delete_repository_policy(repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return policy
else:
@@ -343,36 +332,33 @@ class EcsEcr:
def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration):
repo = self.get_repository(registry_id, name)
- current_mutability_configuration = repo.get('imageTagMutability')
+ current_mutability_configuration = repo.get("imageTagMutability")
if current_mutability_configuration != new_mutability_configuration:
if not self.check_mode:
self.ecr.put_image_tag_mutability(
- repositoryName=name,
- imageTagMutability=new_mutability_configuration,
- **build_kwargs(registry_id))
+ repositoryName=name, imageTagMutability=new_mutability_configuration, **build_kwargs(registry_id)
+ )
else:
self.skipped = True
self.changed = True
- repo['imageTagMutability'] = new_mutability_configuration
+ repo["imageTagMutability"] = new_mutability_configuration
return repo
def get_lifecycle_policy(self, registry_id, name):
try:
- res = self.ecr.get_lifecycle_policy(
- repositoryName=name, **build_kwargs(registry_id))
- text = res.get('lifecyclePolicyText')
+ res = self.ecr.get_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id))
+ text = res.get("lifecyclePolicyText")
return text and json.loads(text)
- except is_boto3_error_code(['LifecyclePolicyNotFoundException', 'RepositoryNotFoundException']):
+ except is_boto3_error_code(["LifecyclePolicyNotFoundException", "RepositoryNotFoundException"]):
return None
def put_lifecycle_policy(self, registry_id, name, policy_text):
if not self.check_mode:
policy = self.ecr.put_lifecycle_policy(
- repositoryName=name,
- lifecyclePolicyText=policy_text,
- **build_kwargs(registry_id))
+ repositoryName=name, lifecyclePolicyText=policy_text, **build_kwargs(registry_id)
+ )
self.changed = True
return policy
else:
@@ -380,15 +366,13 @@ class EcsEcr:
if self.get_repository(registry_id, name) is None:
printable = name
if registry_id:
- printable = '{0}:{1}'.format(registry_id, name)
- raise Exception(
- 'could not find repository {0}'.format(printable))
+ printable = f"{registry_id}:{name}"
+ raise Exception(f"could not find repository {printable}")
return
def purge_lifecycle_policy(self, registry_id, name):
if not self.check_mode:
- policy = self.ecr.delete_lifecycle_policy(
- repositoryName=name, **build_kwargs(registry_id))
+ policy = self.ecr.delete_lifecycle_policy(repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return policy
else:
@@ -402,14 +386,11 @@ class EcsEcr:
if not self.check_mode:
if registry_id:
scan = self.ecr.put_image_scanning_configuration(
- registryId=registry_id,
- repositoryName=name,
- imageScanningConfiguration={'scanOnPush': scan_on_push}
+ registryId=registry_id, repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push}
)
else:
scan = self.ecr.put_image_scanning_configuration(
- repositoryName=name,
- imageScanningConfiguration={'scanOnPush': scan_on_push}
+ repositoryName=name, imageScanningConfiguration={"scanOnPush": scan_on_push}
)
self.changed = True
return scan
@@ -419,11 +400,11 @@ class EcsEcr:
def sort_lists_of_strings(policy):
- for statement_index in range(0, len(policy.get('Statement', []))):
- for key in policy['Statement'][statement_index]:
- value = policy['Statement'][statement_index][key]
+ for statement_index in range(0, len(policy.get("Statement", []))):
+ for key in policy["Statement"][statement_index]:
+ value = policy["Statement"][statement_index][key]
if isinstance(value, list) and all(isinstance(item, string_types) for item in value):
- policy['Statement'][statement_index][key] = sorted(value)
+ policy["Statement"][statement_index][key] = sorted(value)
return policy
@@ -431,151 +412,138 @@ def run(ecr, params):
# type: (EcsEcr, dict, int) -> Tuple[bool, dict]
result = {}
try:
- name = params['name']
- state = params['state']
- policy_text = params['policy']
- purge_policy = params['purge_policy']
- force_absent = params['force_absent']
- registry_id = params['registry_id']
- force_set_policy = params['force_set_policy']
- image_tag_mutability = params['image_tag_mutability'].upper()
- lifecycle_policy_text = params['lifecycle_policy']
- purge_lifecycle_policy = params['purge_lifecycle_policy']
- scan_on_push = params['scan_on_push']
- encryption_configuration = snake_dict_to_camel_dict(params['encryption_configuration'])
+ name = params["name"]
+ state = params["state"]
+ policy_text = params["policy"]
+ purge_policy = params["purge_policy"]
+ force_absent = params["force_absent"]
+ registry_id = params["registry_id"]
+ force_set_policy = params["force_set_policy"]
+ image_tag_mutability = params["image_tag_mutability"].upper()
+ lifecycle_policy_text = params["lifecycle_policy"]
+ purge_lifecycle_policy = params["purge_lifecycle_policy"]
+ scan_on_push = params["scan_on_push"]
+ encryption_configuration = snake_dict_to_camel_dict(params["encryption_configuration"])
# Parse policies, if they are given
try:
policy = policy_text and json.loads(policy_text)
except ValueError:
- result['policy'] = policy_text
- result['msg'] = 'Could not parse policy'
+ result["policy"] = policy_text
+ result["msg"] = "Could not parse policy"
return False, result
try:
- lifecycle_policy = \
- lifecycle_policy_text and json.loads(lifecycle_policy_text)
+ lifecycle_policy = lifecycle_policy_text and json.loads(lifecycle_policy_text)
except ValueError:
- result['lifecycle_policy'] = lifecycle_policy_text
- result['msg'] = 'Could not parse lifecycle_policy'
+ result["lifecycle_policy"] = lifecycle_policy_text
+ result["msg"] = "Could not parse lifecycle_policy"
return False, result
- result['state'] = state
- result['created'] = False
+ result["state"] = state
+ result["created"] = False
repo = ecr.get_repository(registry_id, name)
- if state == 'present':
- result['created'] = False
+ if state == "present":
+ result["created"] = False
if not repo:
- repo = ecr.create_repository(
- registry_id, name, image_tag_mutability, encryption_configuration)
- result['changed'] = True
- result['created'] = True
+ repo = ecr.create_repository(registry_id, name, image_tag_mutability, encryption_configuration)
+ result["changed"] = True
+ result["created"] = True
else:
if encryption_configuration is not None:
- if repo.get('encryptionConfiguration') != encryption_configuration:
- result['msg'] = 'Cannot modify repository encryption type'
+ if repo.get("encryptionConfiguration") != encryption_configuration:
+ result["msg"] = "Cannot modify repository encryption type"
return False, result
repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability)
- result['repository'] = repo
+ result["repository"] = repo
if purge_lifecycle_policy:
- original_lifecycle_policy = \
- ecr.get_lifecycle_policy(registry_id, name)
+ original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name)
- result['lifecycle_policy'] = None
+ result["lifecycle_policy"] = None
if original_lifecycle_policy:
ecr.purge_lifecycle_policy(registry_id, name)
- result['changed'] = True
+ result["changed"] = True
elif lifecycle_policy_text is not None:
try:
- lifecycle_policy = sort_json_policy_dict(lifecycle_policy)
- result['lifecycle_policy'] = lifecycle_policy
+ result["lifecycle_policy"] = lifecycle_policy
+ original_lifecycle_policy = ecr.get_lifecycle_policy(registry_id, name)
- original_lifecycle_policy = ecr.get_lifecycle_policy(
- registry_id, name)
-
- if original_lifecycle_policy:
- original_lifecycle_policy = sort_json_policy_dict(
- original_lifecycle_policy)
-
- if original_lifecycle_policy != lifecycle_policy:
- ecr.put_lifecycle_policy(registry_id, name,
- lifecycle_policy_text)
- result['changed'] = True
+ if compare_policies(original_lifecycle_policy, lifecycle_policy):
+ ecr.put_lifecycle_policy(registry_id, name, lifecycle_policy_text)
+ result["changed"] = True
except Exception:
# Some failure w/ the policy. It's helpful to know what the
# policy is.
- result['lifecycle_policy'] = lifecycle_policy_text
+ result["lifecycle_policy"] = lifecycle_policy_text
raise
if purge_policy:
original_policy = ecr.get_repository_policy(registry_id, name)
- result['policy'] = None
+ result["policy"] = None
if original_policy:
ecr.delete_repository_policy(registry_id, name)
- result['changed'] = True
+ result["changed"] = True
elif policy_text is not None:
try:
# Sort any lists containing only string types
policy = sort_lists_of_strings(policy)
- result['policy'] = policy
+ result["policy"] = policy
- original_policy = ecr.get_repository_policy(
- registry_id, name)
+ original_policy = ecr.get_repository_policy(registry_id, name)
if original_policy:
original_policy = sort_lists_of_strings(original_policy)
if compare_policies(original_policy, policy):
- ecr.set_repository_policy(
- registry_id, name, policy_text, force_set_policy)
- result['changed'] = True
+ ecr.set_repository_policy(registry_id, name, policy_text, force_set_policy)
+ result["changed"] = True
except Exception:
# Some failure w/ the policy. It's helpful to know what the
# policy is.
- result['policy'] = policy_text
+ result["policy"] = policy_text
raise
else:
original_policy = ecr.get_repository_policy(registry_id, name)
if original_policy:
- result['policy'] = original_policy
+ result["policy"] = original_policy
original_scan_on_push = ecr.get_repository(registry_id, name)
if original_scan_on_push is not None:
- if scan_on_push != original_scan_on_push['imageScanningConfiguration']['scanOnPush']:
- result['changed'] = True
- result['repository']['imageScanningConfiguration']['scanOnPush'] = scan_on_push
+ if scan_on_push != original_scan_on_push["imageScanningConfiguration"]["scanOnPush"]:
+ result["changed"] = True
+ result["repository"]["imageScanningConfiguration"]["scanOnPush"] = scan_on_push
response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push)
- elif state == 'absent':
- result['name'] = name
+ elif state == "absent":
+ result["name"] = name
if repo:
ecr.delete_repository(registry_id, name, force_absent)
- result['changed'] = True
+ result["changed"] = True
except Exception as err:
msg = str(err)
if isinstance(err, botocore.exceptions.ClientError):
msg = boto_exception(err)
- result['msg'] = msg
- result['exception'] = traceback.format_exc()
+ result["msg"] = msg
+ result["exception"] = traceback.format_exc()
return False, result
if ecr.skipped:
- result['skipped'] = True
+ result["skipped"] = True
if ecr.changed:
- result['changed'] = True
+ result["changed"] = True
return True, result
@@ -584,34 +552,37 @@ def main():
argument_spec = dict(
name=dict(required=True),
registry_id=dict(required=False),
- state=dict(required=False, choices=['present', 'absent'],
- default='present'),
- force_absent=dict(required=False, type='bool', default=False),
- force_set_policy=dict(required=False, type='bool', default=False),
- policy=dict(required=False, type='json'),
- image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'],
- default='mutable'),
- purge_policy=dict(required=False, type='bool'),
- lifecycle_policy=dict(required=False, type='json'),
- purge_lifecycle_policy=dict(required=False, type='bool'),
- scan_on_push=(dict(required=False, type='bool', default=False)),
+ state=dict(required=False, choices=["present", "absent"], default="present"),
+ force_absent=dict(required=False, type="bool", default=False),
+ force_set_policy=dict(required=False, type="bool", default=False),
+ policy=dict(required=False, type="json"),
+ image_tag_mutability=dict(required=False, choices=["mutable", "immutable"], default="mutable"),
+ purge_policy=dict(required=False, type="bool"),
+ lifecycle_policy=dict(required=False, type="json"),
+ purge_lifecycle_policy=dict(required=False, type="bool"),
+ scan_on_push=(dict(required=False, type="bool", default=False)),
encryption_configuration=dict(
required=False,
- type='dict',
+ type="dict",
options=dict(
- encryption_type=dict(required=False, type='str', default='AES256', choices=['AES256', 'KMS']),
- kms_key=dict(required=False, type='str', no_log=False),
+ encryption_type=dict(required=False, type="str", default="AES256", choices=["AES256", "KMS"]),
+ kms_key=dict(required=False, type="str", no_log=False),
),
required_if=[
- ['encryption_type', 'KMS', ['kms_key']],
+ ["encryption_type", "KMS", ["kms_key"]],
],
),
)
mutually_exclusive = [
- ['policy', 'purge_policy'],
- ['lifecycle_policy', 'purge_lifecycle_policy']]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
+ ["policy", "purge_policy"],
+ ["lifecycle_policy", "purge_lifecycle_policy"],
+ ]
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ )
ecr = EcsEcr(module)
passed, result = run(ecr, module.params)
@@ -622,5 +593,5 @@ def main():
module.fail_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_service.py b/ansible_collections/community/aws/plugins/modules/ecs_service.py
index 2d86a6bd5..e832fa3b5 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_service.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_service.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ecs_service
version_added: 1.0.0
@@ -297,12 +296,12 @@ options:
required: false
version_added: 4.1.0
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- community.aws.ecs_service:
@@ -321,10 +320,10 @@ EXAMPLES = r'''
desired_count: 0
network_configuration:
subnets:
- - subnet-abcd1234
+ - subnet-abcd1234
security_groups:
- - sg-aaaa1111
- - my_security_group
+ - sg-aaaa1111
+ - my_security_group
# Simple example to delete
- community.aws.ecs_service:
@@ -358,8 +357,8 @@ EXAMPLES = r'''
desired_count: 3
deployment_configuration:
deployment_circuit_breaker:
- enable: True
- rollback: True
+ enable: true
+ rollback: true
# With capacity_provider_strategy (added in version 4.0)
- community.aws.ecs_service:
@@ -384,9 +383,9 @@ EXAMPLES = r'''
Firstname: jane
lastName: doe
propagate_tags: SERVICE
-'''
+"""
-RETURN = r'''
+RETURN = r"""
service:
description: Details of created service.
returned: when creating a service
@@ -678,31 +677,33 @@ ansible_facts:
returned: always
type: str
-'''
-import time
+"""
-DEPLOYMENT_CONTROLLER_TYPE_MAP = {
- 'type': 'str',
-}
+import time
-DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
- 'maximum_percent': 'int',
- 'minimum_healthy_percent': 'int',
- 'deployment_circuit_breaker': 'dict',
-}
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import map_complex_type
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+DEPLOYMENT_CONTROLLER_TYPE_MAP = {
+ "type": "str",
+}
+
+DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ "maximum_percent": "int",
+ "minimum_healthy_percent": "int",
+ "deployment_circuit_breaker": "dict",
+}
class EcsServiceManager:
@@ -710,32 +711,32 @@ class EcsServiceManager:
def __init__(self, module):
self.module = module
- self.ecs = module.client('ecs')
- self.ec2 = module.client('ec2')
+ self.ecs = module.client("ecs")
+ self.ec2 = module.client("ec2")
def format_network_configuration(self, network_config):
result = dict()
- if network_config['subnets'] is not None:
- result['subnets'] = network_config['subnets']
+ if network_config["subnets"] is not None:
+ result["subnets"] = network_config["subnets"]
else:
self.module.fail_json(msg="Network configuration must include subnets")
- if network_config['security_groups'] is not None:
- groups = network_config['security_groups']
- if any(not sg.startswith('sg-') for sg in groups):
+ if network_config["security_groups"] is not None:
+ groups = network_config["security_groups"]
+ if any(not sg.startswith("sg-") for sg in groups):
try:
- vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
+ vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"]
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
- result['securityGroups'] = groups
- if network_config['assign_public_ip'] is not None:
- if network_config['assign_public_ip'] is True:
- result['assignPublicIp'] = "ENABLED"
+ result["securityGroups"] = groups
+ if network_config["assign_public_ip"] is not None:
+ if network_config["assign_public_ip"] is True:
+ result["assignPublicIp"] = "ENABLED"
else:
- result['assignPublicIp'] = "DISABLED"
+ result["assignPublicIp"] = "DISABLED"
return dict(awsvpcConfiguration=result)
- def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
+ def find_in_array(self, array_of_services, service_name, field_name="serviceArn"):
for c in array_of_services:
if c[field_name].endswith(service_name):
return c
@@ -745,42 +746,42 @@ class EcsServiceManager:
response = self.ecs.describe_services(
cluster=cluster_name,
services=[service_name],
- include=['TAGS'],
+ include=["TAGS"],
)
- msg = ''
+ msg = ""
- if len(response['failures']) > 0:
- c = self.find_in_array(response['failures'], service_name, 'arn')
- msg += ", failure reason is " + c['reason']
- if c and c['reason'] == 'MISSING':
+ if len(response["failures"]) > 0:
+ c = self.find_in_array(response["failures"], service_name, "arn")
+ msg += ", failure reason is " + c["reason"]
+ if c and c["reason"] == "MISSING":
return None
# fall thru and look through found ones
- if len(response['services']) > 0:
- c = self.find_in_array(response['services'], service_name)
+ if len(response["services"]) > 0:
+ c = self.find_in_array(response["services"], service_name)
if c:
return c
- raise Exception("Unknown problem describing service %s." % service_name)
+ raise Exception(f"Unknown problem describing service {service_name}.")
def is_matching_service(self, expected, existing):
# aws returns the arn of the task definition
# arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3
# but the user is just entering
# ansible-fargate-nginx:3
- if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]:
- if existing.get('deploymentController', {}).get('type', None) != 'CODE_DEPLOY':
+ if expected["task_definition"] != existing["taskDefinition"].split("/")[-1]:
+ if existing.get("deploymentController", {}).get("type", None) != "CODE_DEPLOY":
return False
- if expected.get('health_check_grace_period_seconds'):
- if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'):
+ if expected.get("health_check_grace_period_seconds"):
+ if expected.get("health_check_grace_period_seconds") != existing.get("healthCheckGracePeriodSeconds"):
return False
- if (expected['load_balancers'] or []) != existing['loadBalancers']:
+ if (expected["load_balancers"] or []) != existing["loadBalancers"]:
return False
- if (expected['propagate_tags'] or "NONE") != existing['propagateTags']:
+ if (expected["propagate_tags"] or "NONE") != existing["propagateTags"]:
return False
- if boto3_tag_list_to_ansible_dict(existing.get('tags', [])) != (expected['tags'] or {}):
+ if boto3_tag_list_to_ansible_dict(existing.get("tags", [])) != (expected["tags"] or {}):
return False
if (expected["enable_execute_command"] or False) != existing.get("enableExecuteCommand", False):
@@ -788,8 +789,8 @@ class EcsServiceManager:
# expected is params. DAEMON scheduling strategy returns desired count equal to
# number of instances running; don't check desired count if scheduling strat is daemon
- if (expected['scheduling_strategy'] != 'DAEMON'):
- if (expected['desired_count'] or 0) != existing['desiredCount']:
+ if expected["scheduling_strategy"] != "DAEMON":
+ if (expected["desired_count"] or 0) != existing["desiredCount"]:
return False
return True
@@ -818,7 +819,6 @@ class EcsServiceManager:
propagate_tags,
enable_execute_command,
):
-
params = dict(
cluster=cluster_name,
serviceName=service_name,
@@ -827,47 +827,49 @@ class EcsServiceManager:
clientToken=client_token,
role=role,
deploymentConfiguration=deployment_configuration,
- placementStrategy=placement_strategy
+ placementStrategy=placement_strategy,
)
if network_configuration:
- params['networkConfiguration'] = network_configuration
+ params["networkConfiguration"] = network_configuration
if deployment_controller:
- params['deploymentController'] = deployment_controller
+ params["deploymentController"] = deployment_controller
if launch_type:
- params['launchType'] = launch_type
+ params["launchType"] = launch_type
if platform_version:
- params['platformVersion'] = platform_version
+ params["platformVersion"] = platform_version
if self.health_check_setable(params) and health_check_grace_period_seconds is not None:
- params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
+ params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds
if service_registries:
- params['serviceRegistries'] = service_registries
+ params["serviceRegistries"] = service_registries
# filter placement_constraint and left only those where value is not None
# use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation
if placement_constraints:
- params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None}
- for constraint in placement_constraints]
+ params["placementConstraints"] = [
+ {key: value for key, value in constraint.items() if value is not None}
+ for constraint in placement_constraints
+ ]
# desired count is not required if scheduling strategy is daemon
if desired_count is not None:
- params['desiredCount'] = desired_count
+ params["desiredCount"] = desired_count
if capacity_provider_strategy:
- params['capacityProviderStrategy'] = capacity_provider_strategy
+ params["capacityProviderStrategy"] = capacity_provider_strategy
if propagate_tags:
- params['propagateTags'] = propagate_tags
+ params["propagateTags"] = propagate_tags
# desired count is not required if scheduling strategy is daemon
if desired_count is not None:
- params['desiredCount'] = desired_count
+ params["desiredCount"] = desired_count
if tags:
- params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+ params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value")
if scheduling_strategy:
- params['schedulingStrategy'] = scheduling_strategy
+ params["schedulingStrategy"] = scheduling_strategy
if enable_execute_command:
params["enableExecuteCommand"] = enable_execute_command
response = self.ecs.create_service(**params)
- return self.jsonize(response['service'])
+ return self.jsonize(response["service"])
def update_service(
self,
@@ -891,242 +893,262 @@ class EcsServiceManager:
cluster=cluster_name,
service=service_name,
taskDefinition=task_definition,
- deploymentConfiguration=deployment_configuration)
+ deploymentConfiguration=deployment_configuration,
+ )
# filter placement_constraint and left only those where value is not None
# use-case: `distinctInstance` type should never contain `expression`, but None will fail `str` type validation
if placement_constraints:
- params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None}
- for constraint in placement_constraints]
+ params["placementConstraints"] = [
+ {key: value for key, value in constraint.items() if value is not None}
+ for constraint in placement_constraints
+ ]
if purge_placement_constraints and not placement_constraints:
- params['placementConstraints'] = []
+ params["placementConstraints"] = []
if placement_strategy:
- params['placementStrategy'] = placement_strategy
+ params["placementStrategy"] = placement_strategy
if purge_placement_strategy and not placement_strategy:
- params['placementStrategy'] = []
+ params["placementStrategy"] = []
if network_configuration:
- params['networkConfiguration'] = network_configuration
+ params["networkConfiguration"] = network_configuration
if force_new_deployment:
- params['forceNewDeployment'] = force_new_deployment
+ params["forceNewDeployment"] = force_new_deployment
if capacity_provider_strategy:
- params['capacityProviderStrategy'] = capacity_provider_strategy
+ params["capacityProviderStrategy"] = capacity_provider_strategy
if health_check_grace_period_seconds is not None:
- params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
+ params["healthCheckGracePeriodSeconds"] = health_check_grace_period_seconds
# desired count is not required if scheduling strategy is daemon
if desired_count is not None:
- params['desiredCount'] = desired_count
+ params["desiredCount"] = desired_count
if enable_execute_command is not None:
params["enableExecuteCommand"] = enable_execute_command
if load_balancers:
- params['loadBalancers'] = load_balancers
+ params["loadBalancers"] = load_balancers
response = self.ecs.update_service(**params)
- return self.jsonize(response['service'])
+ return self.jsonize(response["service"])
def jsonize(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
- if 'createdAt' in service:
- service['createdAt'] = str(service['createdAt'])
- if 'deployments' in service:
- for d in service['deployments']:
- if 'createdAt' in d:
- d['createdAt'] = str(d['createdAt'])
- if 'updatedAt' in d:
- d['updatedAt'] = str(d['updatedAt'])
- if 'events' in service:
- for e in service['events']:
- if 'createdAt' in e:
- e['createdAt'] = str(e['createdAt'])
+ if "createdAt" in service:
+ service["createdAt"] = str(service["createdAt"])
+ if "deployments" in service:
+ for d in service["deployments"]:
+ if "createdAt" in d:
+ d["createdAt"] = str(d["createdAt"])
+ if "updatedAt" in d:
+ d["updatedAt"] = str(d["updatedAt"])
+ if "events" in service:
+ for e in service["events"]:
+ if "createdAt" in e:
+ e["createdAt"] = str(e["createdAt"])
return service
def delete_service(self, service, cluster=None, force=False):
return self.ecs.delete_service(cluster=cluster, service=service, force=force)
def health_check_setable(self, params):
- load_balancers = params.get('loadBalancers', [])
+ load_balancers = params.get("loadBalancers", [])
return len(load_balancers) > 0
def main():
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent', 'deleting']),
- name=dict(required=True, type='str', aliases=['service']),
- cluster=dict(required=False, type='str', default='default'),
- task_definition=dict(required=False, type='str'),
- load_balancers=dict(required=False, default=[], type='list', elements='dict'),
- desired_count=dict(required=False, type='int'),
- client_token=dict(required=False, default='', type='str', no_log=False),
- role=dict(required=False, default='', type='str'),
- delay=dict(required=False, type='int', default=10),
- repeat=dict(required=False, type='int', default=10),
- force_new_deployment=dict(required=False, default=False, type='bool'),
- force_deletion=dict(required=False, default=False, type='bool'),
- deployment_controller=dict(required=False, default={}, type='dict'),
- deployment_configuration=dict(required=False, default={}, type='dict'),
- wait=dict(required=False, default=False, type='bool'),
+ state=dict(required=True, choices=["present", "absent", "deleting"]),
+ name=dict(required=True, type="str", aliases=["service"]),
+ cluster=dict(required=False, type="str", default="default"),
+ task_definition=dict(required=False, type="str"),
+ load_balancers=dict(required=False, default=[], type="list", elements="dict"),
+ desired_count=dict(required=False, type="int"),
+ client_token=dict(required=False, default="", type="str", no_log=False),
+ role=dict(required=False, default="", type="str"),
+ delay=dict(required=False, type="int", default=10),
+ repeat=dict(required=False, type="int", default=10),
+ force_new_deployment=dict(required=False, default=False, type="bool"),
+ force_deletion=dict(required=False, default=False, type="bool"),
+ deployment_controller=dict(required=False, default={}, type="dict"),
+ deployment_configuration=dict(required=False, default={}, type="dict"),
+ wait=dict(required=False, default=False, type="bool"),
placement_constraints=dict(
required=False,
default=[],
- type='list',
- elements='dict',
- options=dict(
- type=dict(type='str'),
- expression=dict(required=False, type='str')
- )
+ type="list",
+ elements="dict",
+ options=dict(type=dict(type="str"), expression=dict(required=False, type="str")),
),
- purge_placement_constraints=dict(required=False, default=False, type='bool'),
+ purge_placement_constraints=dict(required=False, default=False, type="bool"),
placement_strategy=dict(
required=False,
default=[],
- type='list',
- elements='dict',
+ type="list",
+ elements="dict",
+ options=dict(
+ type=dict(type="str"),
+ field=dict(type="str"),
+ ),
+ ),
+ purge_placement_strategy=dict(required=False, default=False, type="bool"),
+ health_check_grace_period_seconds=dict(required=False, type="int"),
+ network_configuration=dict(
+ required=False,
+ type="dict",
options=dict(
- type=dict(type='str'),
- field=dict(type='str'),
- )
+ subnets=dict(type="list", elements="str"),
+ security_groups=dict(type="list", elements="str"),
+ assign_public_ip=dict(type="bool"),
+ ),
),
- purge_placement_strategy=dict(required=False, default=False, type='bool'),
- health_check_grace_period_seconds=dict(required=False, type='int'),
- network_configuration=dict(required=False, type='dict', options=dict(
- subnets=dict(type='list', elements='str'),
- security_groups=dict(type='list', elements='str'),
- assign_public_ip=dict(type='bool')
- )),
- launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
- platform_version=dict(required=False, type='str'),
- service_registries=dict(required=False, type='list', default=[], elements='dict'),
- scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']),
+ launch_type=dict(required=False, choices=["EC2", "FARGATE"]),
+ platform_version=dict(required=False, type="str"),
+ service_registries=dict(required=False, type="list", default=[], elements="dict"),
+ scheduling_strategy=dict(required=False, choices=["DAEMON", "REPLICA"]),
capacity_provider_strategy=dict(
required=False,
- type='list',
+ type="list",
default=[],
- elements='dict',
+ elements="dict",
options=dict(
- capacity_provider=dict(type='str'),
- weight=dict(type='int'),
- base=dict(type='int')
- )
+ capacity_provider=dict(type="str"),
+ weight=dict(type="int"),
+ base=dict(type="int"),
+ ),
),
propagate_tags=dict(required=False, choices=["TASK_DEFINITION", "SERVICE"]),
tags=dict(required=False, type="dict"),
enable_execute_command=dict(required=False, type="bool"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[('launch_type', 'FARGATE', ['network_configuration'])],
- required_together=[['load_balancers', 'role']],
- mutually_exclusive=[['launch_type', 'capacity_provider_strategy']])
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[("launch_type", "FARGATE", ["network_configuration"])],
+ required_together=[["load_balancers", "role"]],
+ mutually_exclusive=[["launch_type", "capacity_provider_strategy"]],
+ )
- if module.params['state'] == 'present':
- if module.params['scheduling_strategy'] == 'REPLICA' and module.params['desired_count'] is None:
- module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count')
- if module.params['task_definition'] is None and not module.params['force_new_deployment']:
- module.fail_json(msg='Either task_definition or force_new_deployment is required when status is present.')
+ if module.params["state"] == "present":
+ if module.params["scheduling_strategy"] == "REPLICA" and module.params["desired_count"] is None:
+ module.fail_json(msg="state is present, scheduling_strategy is REPLICA; missing desired_count")
+ if module.params["task_definition"] is None and not module.params["force_new_deployment"]:
+ module.fail_json(msg="Either task_definition or force_new_deployment is required when status is present.")
- if len(module.params['capacity_provider_strategy']) > 6:
- module.fail_json(msg='AWS allows a maximum of six capacity providers in the strategy.')
+ if len(module.params["capacity_provider_strategy"]) > 6:
+ module.fail_json(msg="AWS allows a maximum of six capacity providers in the strategy.")
service_mgr = EcsServiceManager(module)
- if module.params['network_configuration']:
- network_configuration = service_mgr.format_network_configuration(module.params['network_configuration'])
+ if module.params["network_configuration"]:
+ network_configuration = service_mgr.format_network_configuration(module.params["network_configuration"])
else:
network_configuration = None
- deployment_controller = map_complex_type(module.params['deployment_controller'],
- DEPLOYMENT_CONTROLLER_TYPE_MAP)
+ deployment_controller = map_complex_type(module.params["deployment_controller"], DEPLOYMENT_CONTROLLER_TYPE_MAP)
deploymentController = snake_dict_to_camel_dict(deployment_controller)
- deployment_configuration = map_complex_type(module.params['deployment_configuration'],
- DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+ deployment_configuration = map_complex_type(
+ module.params["deployment_configuration"], DEPLOYMENT_CONFIGURATION_TYPE_MAP
+ )
deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
- serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries']))
- capacityProviders = list(map(snake_dict_to_camel_dict, module.params['capacity_provider_strategy']))
+ serviceRegistries = list(map(snake_dict_to_camel_dict, module.params["service_registries"]))
+ capacityProviders = list(map(snake_dict_to_camel_dict, module.params["capacity_provider_strategy"]))
try:
- existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ existing = service_mgr.describe_service(module.params["cluster"], module.params["name"])
except Exception as e:
- module.fail_json_aws(e,
- msg="Exception describing service '{0}' in cluster '{1}'"
- .format(module.params['name'], module.params['cluster']))
+ module.fail_json_aws(
+ e,
+ msg=f"Exception describing service '{module.params['name']}' in cluster '{module.params['cluster']}'",
+ )
results = dict(changed=False)
- if module.params['state'] == 'present':
-
+ if module.params["state"] == "present":
matching = False
update = False
- if existing and 'status' in existing and existing['status'] == "ACTIVE":
- if module.params['force_new_deployment']:
+ if existing and "status" in existing and existing["status"] == "ACTIVE":
+ if module.params["force_new_deployment"]:
update = True
elif service_mgr.is_matching_service(module.params, existing):
matching = True
- results['service'] = existing
+ results["service"] = existing
else:
update = True
if not matching:
if not module.check_mode:
-
- role = module.params['role']
- clientToken = module.params['client_token']
+ role = module.params["role"]
+ clientToken = module.params["client_token"]
loadBalancers = []
- for loadBalancer in module.params['load_balancers']:
- if 'containerPort' in loadBalancer:
- loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
+ for loadBalancer in module.params["load_balancers"]:
+ if "containerPort" in loadBalancer:
+ loadBalancer["containerPort"] = int(loadBalancer["containerPort"])
loadBalancers.append(loadBalancer)
for loadBalancer in loadBalancers:
- if 'containerPort' in loadBalancer:
- loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
+ if "containerPort" in loadBalancer:
+ loadBalancer["containerPort"] = int(loadBalancer["containerPort"])
if update:
# check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature
- if module.params['scheduling_strategy']:
- if (existing['schedulingStrategy']) != module.params['scheduling_strategy']:
- module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service")
-
- if module.params['service_registries']:
- if (existing['serviceRegistries'] or []) != serviceRegistries:
- module.fail_json(msg="It is not possible to update the service registries of an existing service")
- if module.params['capacity_provider_strategy']:
- if 'launchType' in existing.keys():
- module.fail_json(msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy.")
- if module.params['launch_type']:
- if 'capacityProviderStrategy' in existing.keys():
- module.fail_json(msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type.")
- if (existing['loadBalancers'] or []) != loadBalancers:
+ if module.params["scheduling_strategy"]:
+ if (existing["schedulingStrategy"]) != module.params["scheduling_strategy"]:
+ module.fail_json(
+ msg="It is not possible to update the scheduling strategy of an existing service"
+ )
+
+ if module.params["service_registries"]:
+ if (existing["serviceRegistries"] or []) != serviceRegistries:
+ module.fail_json(
+ msg="It is not possible to update the service registries of an existing service"
+ )
+ if module.params["capacity_provider_strategy"]:
+ if "launchType" in existing.keys():
+ module.fail_json(
+ msg="It is not possible to change an existing service from launch_type to capacity_provider_strategy."
+ )
+ if module.params["launch_type"]:
+ if "capacityProviderStrategy" in existing.keys():
+ module.fail_json(
+ msg="It is not possible to change an existing service from capacity_provider_strategy to launch_type."
+ )
+ if (existing["loadBalancers"] or []) != loadBalancers:
# fails if deployment type is not CODE_DEPLOY or ECS
- if existing['deploymentController']['type'] not in ['CODE_DEPLOY', 'ECS']:
- module.fail_json(msg="It is not possible to update the load balancers of an existing service")
+ if existing["deploymentController"]["type"] not in ["CODE_DEPLOY", "ECS"]:
+ module.fail_json(
+ msg="It is not possible to update the load balancers of an existing service"
+ )
- if existing.get('deploymentController', {}).get('type', None) == 'CODE_DEPLOY':
- task_definition = ''
+ if existing.get("deploymentController", {}).get("type", None) == "CODE_DEPLOY":
+ task_definition = ""
network_configuration = []
else:
- task_definition = module.params['task_definition']
+ task_definition = module.params["task_definition"]
- if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']:
- module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service")
+ if module.params["propagate_tags"] and module.params["propagate_tags"] != existing["propagateTags"]:
+ module.fail_json(
+ msg="It is not currently supported to enable propagation tags of an existing service"
+ )
- if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']:
+ if (
+ module.params["tags"]
+ and boto3_tag_list_to_ansible_dict(existing["tags"]) != module.params["tags"]
+ ):
module.fail_json(msg="It is not currently supported to change tags of an existing service")
- updatedLoadBalancers = loadBalancers if existing['deploymentController']['type'] == 'ECS' else []
+ updatedLoadBalancers = loadBalancers if existing["deploymentController"]["type"] == "ECS" else []
- if task_definition is None and module.params['force_new_deployment']:
- task_definition = existing['taskDefinition']
+ if task_definition is None and module.params["force_new_deployment"]:
+ task_definition = existing["taskDefinition"]
try:
# update required
@@ -1178,76 +1200,73 @@ def main():
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't create service")
- if response.get('tags', None):
- response['tags'] = boto3_tag_list_to_ansible_dict(response['tags'])
- results['service'] = response
+ if response.get("tags", None):
+ response["tags"] = boto3_tag_list_to_ansible_dict(response["tags"])
+ results["service"] = response
- results['changed'] = True
+ results["changed"] = True
- elif module.params['state'] == 'absent':
+ elif module.params["state"] == "absent":
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
- del existing['deployments']
- del existing['events']
- results['ansible_facts'] = existing
- if 'status' in existing and existing['status'] == "INACTIVE":
- results['changed'] = False
+ del existing["deployments"]
+ del existing["events"]
+ results["ansible_facts"] = existing
+ if "status" in existing and existing["status"] == "INACTIVE":
+ results["changed"] = False
else:
if not module.check_mode:
try:
service_mgr.delete_service(
- module.params['name'],
- module.params['cluster'],
- module.params['force_deletion'],
+ module.params["name"],
+ module.params["cluster"],
+ module.params["force_deletion"],
)
# Wait for service to be INACTIVE prior to exiting
- if module.params['wait']:
- waiter = service_mgr.ecs.get_waiter('services_inactive')
+ if module.params["wait"]:
+ waiter = service_mgr.ecs.get_waiter("services_inactive")
try:
waiter.wait(
- services=[module.params['name']],
- cluster=module.params['cluster'],
+ services=[module.params["name"]],
+ cluster=module.params["cluster"],
WaiterConfig={
- 'Delay': module.params['delay'],
- 'MaxAttempts': module.params['repeat']
- }
+ "Delay": module.params["delay"],
+ "MaxAttempts": module.params["repeat"],
+ },
)
except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, 'Timeout waiting for service removal')
+ module.fail_json_aws(e, "Timeout waiting for service removal")
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Couldn't delete service")
- results['changed'] = True
+ results["changed"] = True
- elif module.params['state'] == 'deleting':
+ elif module.params["state"] == "deleting":
if not existing:
- module.fail_json(msg="Service '" + module.params['name'] + " not found.")
+ module.fail_json(msg="Service '" + module.params["name"] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
- delay = module.params['delay']
- repeat = module.params['repeat']
+ delay = module.params["delay"]
+ repeat = module.params["repeat"]
time.sleep(delay)
for i in range(repeat):
- existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
- status = existing['status']
+ existing = service_mgr.describe_service(module.params["cluster"], module.params["name"])
+ status = existing["status"]
if status == "INACTIVE":
- results['changed'] = True
+ results["changed"] = True
break
time.sleep(delay)
if i is repeat - 1:
- module.fail_json(
- msg="Service still not deleted after {0} tries of {1} seconds each."
- .format(repeat, delay)
- )
+ module.fail_json(msg=f"Service still not deleted after {repeat} tries of {delay} seconds each.")
return
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_service_info.py b/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
index f174a31cd..02a6abff2 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_service_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ecs_service_info
version_added: 1.0.0
@@ -42,13 +40,12 @@ options:
elements: str
aliases: ['name']
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
@@ -62,9 +59,9 @@ EXAMPLES = r'''
- community.aws.ecs_service_info:
cluster: test-cluster
register: output
-'''
+"""
-RETURN = r'''
+RETURN = r"""
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
@@ -132,16 +129,17 @@ services:
returned: when events is true
type: list
elements: dict
-''' # NOQA
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class EcsServiceManager:
@@ -149,14 +147,14 @@ class EcsServiceManager:
def __init__(self, module):
self.module = module
- self.ecs = module.client('ecs')
+ self.ecs = module.client("ecs")
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def list_services_with_backoff(self, **kwargs):
- paginator = self.ecs.get_paginator('list_services')
+ paginator = self.ecs.get_paginator("list_services")
try:
return paginator.paginate(**kwargs).build_full_result()
- except is_boto3_error_code('ClusterNotFoundException') as e:
+ except is_boto3_error_code("ClusterNotFoundException") as e:
self.module.fail_json_aws(e, "Could not find cluster to list services")
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
@@ -166,43 +164,43 @@ class EcsServiceManager:
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
- fn_args['cluster'] = cluster
+ fn_args["cluster"] = cluster
try:
response = self.list_services_with_backoff(**fn_args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't list ECS services")
- relevant_response = dict(services=response['serviceArns'])
+ relevant_response = dict(services=response["serviceArns"])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
- fn_args['cluster'] = cluster
- fn_args['services'] = services
+ fn_args["cluster"] = cluster
+ fn_args["services"] = services
try:
response = self.describe_services_with_backoff(**fn_args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
- running_services = [self.extract_service_from(service) for service in response.get('services', [])]
- services_not_running = response.get('failures', [])
+ running_services = [self.extract_service_from(service) for service in response.get("services", [])]
+ services_not_running = response.get("failures", [])
return running_services, services_not_running
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
- if 'deployments' in service:
- for d in service['deployments']:
- if 'createdAt' in d:
- d['createdAt'] = str(d['createdAt'])
- if 'updatedAt' in d:
- d['updatedAt'] = str(d['updatedAt'])
- if 'events' in service:
- if not self.module.params['events']:
- del service['events']
+ if "deployments" in service:
+ for d in service["deployments"]:
+ if "createdAt" in d:
+ d["createdAt"] = str(d["createdAt"])
+ if "updatedAt" in d:
+ d["updatedAt"] = str(d["updatedAt"])
+ if "events" in service:
+ if not self.module.params["events"]:
+ del service["events"]
else:
- for e in service['events']:
- if 'createdAt' in e:
- e['createdAt'] = str(e['createdAt'])
+ for e in service["events"]:
+ if "createdAt" in e:
+ e["createdAt"] = str(e["createdAt"])
return service
@@ -210,38 +208,37 @@ def chunks(l, n):
"""Yield successive n-sized chunks from l."""
""" https://stackoverflow.com/a/312464 """
for i in range(0, len(l), n):
- yield l[i:i + n]
+ yield l[i:i + n] # fmt: skip
def main():
-
argument_spec = dict(
- details=dict(type='bool', default=False),
- events=dict(type='bool', default=True),
+ details=dict(type="bool", default=False),
+ events=dict(type="bool", default=True),
cluster=dict(),
- service=dict(type='list', elements='str', aliases=['name'])
+ service=dict(type="list", elements="str", aliases=["name"]),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- show_details = module.params.get('details')
+ show_details = module.params.get("details")
task_mgr = EcsServiceManager(module)
if show_details:
- if module.params['service']:
- services = module.params['service']
+ if module.params["service"]:
+ services = module.params["service"]
else:
- services = task_mgr.list_services(module.params['cluster'])['services']
+ services = task_mgr.list_services(module.params["cluster"])["services"]
ecs_info = dict(services=[], services_not_running=[])
for chunk in chunks(services, 10):
- running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
- ecs_info['services'].extend(running_services)
- ecs_info['services_not_running'].extend(services_not_running)
+ running_services, services_not_running = task_mgr.describe_services(module.params["cluster"], chunk)
+ ecs_info["services"].extend(running_services)
+ ecs_info["services_not_running"].extend(services_not_running)
else:
- ecs_info = task_mgr.list_services(module.params['cluster'])
+ ecs_info = task_mgr.list_services(module.params["cluster"])
module.exit_json(changed=False, **ecs_info)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_tag.py b/ansible_collections/community/aws/plugins/modules/ecs_tag.py
index 8698a7bbd..dd09096ea 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_tag.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_tag.py
@@ -1,20 +1,17 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: (c) 2019, Michael Pechner <mikey@mikey.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ecs_tag
version_added: 1.0.0
short_description: create and remove tags on Amazon ECS resources
-notes:
- - none
description:
- - Creates and removes tags for Amazon ECS resources.
- - Resources are referenced by their cluster name.
+ - Creates and removes tags for Amazon ECS resources.
+ - Resources are referenced by their cluster name.
author:
- Michael Pechner (@mpechner)
options:
@@ -53,13 +50,12 @@ options:
type: bool
default: false
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure tags are present on a resource
community.aws.ecs_tag:
cluster_name: mycluster
@@ -90,12 +86,12 @@ EXAMPLES = r'''
cluster_name: mycluster
resource_type: cluster
tags:
- Name: foo
+ Name: foo
state: absent
purge_tags: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
tags:
description: A dict containing the tags on the resource
returned: always
@@ -108,47 +104,49 @@ removed_tags:
description: A dict of tags that were removed from the resource
returned: If tags were removed
type: dict
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
- pass # Handled by AnsibleAWSModule
-__metaclass__ = type
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_tags(ecs, module, resource):
try:
- return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags'])
+ return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"])
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
+ module.fail_json_aws(e, msg=f"Failed to fetch tags for resource {resource}")
def get_arn(ecs, module, cluster_name, resource_type, resource):
-
try:
- if resource_type == 'cluster':
+ if resource_type == "cluster":
description = ecs.describe_clusters(clusters=[resource])
- resource_arn = description['clusters'][0]['clusterArn']
- elif resource_type == 'task':
+ resource_arn = description["clusters"][0]["clusterArn"]
+ elif resource_type == "task":
description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource])
- resource_arn = description['tasks'][0]['taskArn']
- elif resource_type == 'service':
+ resource_arn = description["tasks"][0]["taskArn"]
+ elif resource_type == "service":
description = ecs.describe_services(cluster=cluster_name, services=[resource])
- resource_arn = description['services'][0]['serviceArn']
- elif resource_type == 'task_definition':
+ resource_arn = description["services"][0]["serviceArn"]
+ elif resource_type == "task_definition":
description = ecs.describe_task_definition(taskDefinition=resource)
- resource_arn = description['taskDefinition']['taskDefinitionArn']
- elif resource_type == 'container':
+ resource_arn = description["taskDefinition"]["taskDefinitionArn"]
+ elif resource_type == "container":
description = ecs.describe_container_instances(clusters=[resource])
- resource_arn = description['containerInstances'][0]['containerInstanceArn']
+ resource_arn = description["containerInstances"][0]["containerInstanceArn"]
except (IndexError, KeyError):
- module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
+ module.fail_json(msg=f"Failed to find {resource_type} {resource}")
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource))
+ module.fail_json_aws(e, msg=f"Failed to find {resource_type} {resource}")
return resource_arn
@@ -157,28 +155,28 @@ def main():
argument_spec = dict(
cluster_name=dict(required=True),
resource=dict(required=False),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container'])
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=False),
+ state=dict(default="present", choices=["present", "absent"]),
+ resource_type=dict(default="cluster", choices=["cluster", "task", "service", "task_definition", "container"]),
)
- required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
+ required_if = [("state", "present", ["tags"]), ("state", "absent", ["tags"])]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
- resource_type = module.params['resource_type']
- cluster_name = module.params['cluster_name']
- if resource_type == 'cluster':
+ resource_type = module.params["resource_type"]
+ cluster_name = module.params["cluster_name"]
+ if resource_type == "cluster":
resource = cluster_name
else:
- resource = module.params['resource']
- tags = module.params['tags']
- state = module.params['state']
- purge_tags = module.params['purge_tags']
+ resource = module.params["resource"]
+ tags = module.params["tags"]
+ state = module.params["state"]
+ purge_tags = module.params["purge_tags"]
- result = {'changed': False}
+ result = {"changed": False}
- ecs = module.client('ecs')
+ ecs = module.client("ecs")
resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource)
@@ -187,7 +185,7 @@ def main():
add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
remove_tags = {}
- if state == 'absent':
+ if state == "absent":
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
@@ -196,28 +194,28 @@ def main():
remove_tags[key] = current_tags[key]
if remove_tags:
- result['changed'] = True
- result['removed_tags'] = remove_tags
+ result["changed"] = True
+ result["removed_tags"] = remove_tags
if not module.check_mode:
try:
ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys()))
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
+ module.fail_json_aws(e, msg=f"Failed to remove tags {remove_tags} from resource {resource}")
- if state == 'present' and add_tags:
- result['changed'] = True
- result['added_tags'] = add_tags
+ if state == "present" and add_tags:
+ result["changed"] = True
+ result["added_tags"] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
- tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value')
+ tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value")
ecs.tag_resource(resourceArn=resource_arn, tags=tags)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
+ module.fail_json_aws(e, msg=f"Failed to set tags {add_tags} on resource {resource}")
- result['tags'] = get_tags(ecs, module, resource_arn)
+ result["tags"] = get_tags(ecs, module, resource_arn)
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_task.py b/ansible_collections/community/aws/plugins/modules/ecs_task.py
index 54948ce21..169ff4c7b 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_task.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_task.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ecs_task
version_added: 1.0.0
@@ -99,13 +97,12 @@ options:
default: false
version_added: 4.1.0
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Simple example of run task
- name: Run task
community.aws.ecs_task:
@@ -120,65 +117,66 @@ EXAMPLES = r'''
- name: Start a task
community.aws.ecs_task:
- operation: start
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- tags:
- resourceName: a_task_for_ansible_to_run
- type: long_running_task
- network: internal
- version: 1.4
- container_instances:
+ operation: start
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ tags:
+ resourceName: a_task_for_ansible_to_run
+ type: long_running_task
+ network: internal
+ version: 1.4
+ container_instances:
- arn:aws:ecs:us-west-2:123456789012:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
- started_by: ansible_user
- network_configuration:
- subnets:
+ started_by: ansible_user
+ network_configuration:
+ subnets:
- subnet-abcd1234
- security_groups:
+ security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate
community.aws.ecs_task:
- operation: run
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- started_by: ansible_user
- launch_type: FARGATE
- network_configuration:
- subnets:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ subnets:
- subnet-abcd1234
- security_groups:
+ security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate with public ip assigned
community.aws.ecs_task:
- operation: run
- count: 2
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- started_by: ansible_user
- launch_type: FARGATE
- network_configuration:
- assign_public_ip: true
- subnets:
+ operation: run
+ count: 2
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ assign_public_ip: true
+ subnets:
- subnet-abcd1234
register: task_output
- name: Stop a task
community.aws.ecs_task:
- operation: stop
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
-'''
-RETURN = r'''
+ operation: stop
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+"""
+
+RETURN = r"""
task:
description: details about the task that was started
returned: success
@@ -242,45 +240,47 @@ task:
description: The launch type on which to run your task.
returned: always
type: str
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class EcsExecManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
- self.ecs = module.client('ecs')
- self.ec2 = module.client('ec2')
+ self.ecs = module.client("ecs")
+ self.ec2 = module.client("ec2")
def format_network_configuration(self, network_config):
result = dict()
- if 'subnets' in network_config:
- result['subnets'] = network_config['subnets']
+ if "subnets" in network_config:
+ result["subnets"] = network_config["subnets"]
else:
self.module.fail_json(msg="Network configuration must include subnets")
- if 'security_groups' in network_config:
- groups = network_config['security_groups']
- if any(not sg.startswith('sg-') for sg in groups):
+ if "security_groups" in network_config:
+ groups = network_config["security_groups"]
+ if any(not sg.startswith("sg-") for sg in groups):
try:
- vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
+ vpc_id = self.ec2.describe_subnets(SubnetIds=[result["subnets"][0]])["Subnets"][0]["VpcId"]
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
- result['securityGroups'] = groups
- if 'assign_public_ip' in network_config:
- if network_config['assign_public_ip'] is True:
- result['assignPublicIp'] = "ENABLED"
+ result["securityGroups"] = groups
+ if "assign_public_ip" in network_config:
+ if network_config["assign_public_ip"] is True:
+ result["assignPublicIp"] = "ENABLED"
else:
- result['assignPublicIp'] = "DISABLED"
+ result["assignPublicIp"] = "DISABLED"
return dict(awsvpcConfiguration=result)
@@ -288,10 +288,10 @@ class EcsExecManager:
response = self.ecs.list_tasks(
cluster=cluster_name,
family=service_name,
- desiredStatus=status
+ desiredStatus=status,
)
- if len(response['taskArns']) > 0:
- for c in response['taskArns']:
+ if len(response["taskArns"]) > 0:
+ for c in response["taskArns"]:
if c.endswith(service_name):
return c
return None
@@ -299,14 +299,17 @@ class EcsExecManager:
def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags):
if overrides is None:
overrides = dict()
- params = dict(cluster=cluster, taskDefinition=task_definition,
- overrides=overrides, count=count, startedBy=startedBy)
- if self.module.params['network_configuration']:
- params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
+ params = dict(
+ cluster=cluster, taskDefinition=task_definition, overrides=overrides, count=count, startedBy=startedBy
+ )
+ if self.module.params["network_configuration"]:
+ params["networkConfiguration"] = self.format_network_configuration(
+ self.module.params["network_configuration"]
+ )
if launch_type:
- params['launchType'] = launch_type
+ params["launchType"] = launch_type
if tags:
- params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+ params["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value")
# TODO: need to check if long arn format enabled.
try:
@@ -314,168 +317,164 @@ class EcsExecManager:
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't run task")
# include tasks and failures
- return response['tasks']
+ return response["tasks"]
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags):
args = dict()
if cluster:
- args['cluster'] = cluster
+ args["cluster"] = cluster
if task_definition:
- args['taskDefinition'] = task_definition
+ args["taskDefinition"] = task_definition
if overrides:
- args['overrides'] = overrides
+ args["overrides"] = overrides
if container_instances:
- args['containerInstances'] = container_instances
+ args["containerInstances"] = container_instances
if startedBy:
- args['startedBy'] = startedBy
- if self.module.params['network_configuration']:
- args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
+ args["startedBy"] = startedBy
+ if self.module.params["network_configuration"]:
+ args["networkConfiguration"] = self.format_network_configuration(
+ self.module.params["network_configuration"]
+ )
if tags:
- args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+ args["tags"] = ansible_dict_to_boto3_tag_list(tags, "key", "value")
try:
response = self.ecs.start_task(**args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't start task")
# include tasks and failures
- return response['tasks']
+ return response["tasks"]
def stop_task(self, cluster, task):
response = self.ecs.stop_task(cluster=cluster, task=task)
- return response['task']
+ return response["task"]
def ecs_task_long_format_enabled(self):
- account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True)
- return account_support['settings'][0]['value'] == 'enabled'
+ account_support = self.ecs.list_account_settings(name="taskLongArnFormat", effectiveSettings=True)
+ return account_support["settings"][0]["value"] == "enabled"
def main():
argument_spec = dict(
- operation=dict(required=True, choices=['run', 'start', 'stop']),
- cluster=dict(required=False, type='str', default='default'), # R S P
- task_definition=dict(required=False, type='str'), # R* S*
- overrides=dict(required=False, type='dict'), # R S
- count=dict(required=False, type='int'), # R
- task=dict(required=False, type='str'), # P*
- container_instances=dict(required=False, type='list', elements='str'), # S*
- started_by=dict(required=False, type='str'), # R S
- network_configuration=dict(required=False, type='dict'),
- launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
- tags=dict(required=False, type='dict', aliases=['resource_tags']),
- wait=dict(required=False, default=False, type='bool'),
+ operation=dict(required=True, choices=["run", "start", "stop"]),
+ cluster=dict(required=False, type="str", default="default"), # R S P
+ task_definition=dict(required=False, type="str"), # R* S*
+ overrides=dict(required=False, type="dict"), # R S
+ count=dict(required=False, type="int"), # R
+ task=dict(required=False, type="str"), # P*
+ container_instances=dict(required=False, type="list", elements="str"), # S*
+ started_by=dict(required=False, type="str"), # R S
+ network_configuration=dict(required=False, type="dict"),
+ launch_type=dict(required=False, choices=["EC2", "FARGATE"]),
+ tags=dict(required=False, type="dict", aliases=["resource_tags"]),
+ wait=dict(required=False, default=False, type="bool"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
- required_if=[
- ('launch_type', 'FARGATE', ['network_configuration']),
- ('operation', 'run', ['task_definition']),
- ('operation', 'start', [
- 'task_definition',
- 'container_instances'
- ]),
- ('operation', 'stop', ['task_definition', 'task']),
- ])
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ("launch_type", "FARGATE", ["network_configuration"]),
+ ("operation", "run", ["task_definition"]),
+ ("operation", "start", ["task_definition", "container_instances"]),
+ ("operation", "stop", ["task_definition", "task"]),
+ ],
+ )
# Validate Inputs
- if module.params['operation'] == 'run':
- task_to_list = module.params['task_definition']
+ if module.params["operation"] == "run":
+ task_to_list = module.params["task_definition"]
status_type = "RUNNING"
- if module.params['operation'] == 'start':
- task_to_list = module.params['task']
+ if module.params["operation"] == "start":
+ task_to_list = module.params["task"]
status_type = "RUNNING"
- if module.params['operation'] == 'stop':
- task_to_list = module.params['task_definition']
+ if module.params["operation"] == "stop":
+ task_to_list = module.params["task_definition"]
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
- if module.params['tags']:
+ if module.params["tags"]:
if not service_mgr.ecs_task_long_format_enabled():
module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")
- existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
+ existing = service_mgr.list_tasks(module.params["cluster"], task_to_list, status_type)
results = dict(changed=False)
- if module.params['operation'] == 'run':
+ if module.params["operation"] == "run":
if existing:
# TBD - validate the rest of the details
- results['task'] = existing
+ results["task"] = existing
else:
if not module.check_mode:
-
# run_task returns a list of tasks created
tasks = service_mgr.run_task(
- module.params['cluster'],
- module.params['task_definition'],
- module.params['overrides'],
- module.params['count'],
- module.params['started_by'],
- module.params['launch_type'],
- module.params['tags'],
+ module.params["cluster"],
+ module.params["task_definition"],
+ module.params["overrides"],
+ module.params["count"],
+ module.params["started_by"],
+ module.params["launch_type"],
+ module.params["tags"],
)
# Wait for task(s) to be running prior to exiting
- if module.params['wait']:
-
- waiter = service_mgr.ecs.get_waiter('tasks_running')
+ if module.params["wait"]:
+ waiter = service_mgr.ecs.get_waiter("tasks_running")
try:
waiter.wait(
- tasks=[task['taskArn'] for task in tasks],
- cluster=module.params['cluster'],
+ tasks=[task["taskArn"] for task in tasks],
+ cluster=module.params["cluster"],
)
except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, 'Timeout waiting for tasks to run')
+ module.fail_json_aws(e, "Timeout waiting for tasks to run")
- results['task'] = tasks
+ results["task"] = tasks
- results['changed'] = True
+ results["changed"] = True
- elif module.params['operation'] == 'start':
+ elif module.params["operation"] == "start":
if existing:
# TBD - validate the rest of the details
- results['task'] = existing
+ results["task"] = existing
else:
if not module.check_mode:
- results['task'] = service_mgr.start_task(
- module.params['cluster'],
- module.params['task_definition'],
- module.params['overrides'],
- module.params['container_instances'],
- module.params['started_by'],
- module.params['tags'],
+ results["task"] = service_mgr.start_task(
+ module.params["cluster"],
+ module.params["task_definition"],
+ module.params["overrides"],
+ module.params["container_instances"],
+ module.params["started_by"],
+ module.params["tags"],
)
- results['changed'] = True
+ results["changed"] = True
- elif module.params['operation'] == 'stop':
+ elif module.params["operation"] == "stop":
if existing:
- results['task'] = existing
+ results["task"] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
- results['task'] = service_mgr.stop_task(
- module.params['cluster'],
- module.params['task']
- )
+ results["task"] = service_mgr.stop_task(module.params["cluster"], module.params["task"])
# Wait for task to be stopped prior to exiting
- if module.params['wait']:
-
- waiter = service_mgr.ecs.get_waiter('tasks_stopped')
+ if module.params["wait"]:
+ waiter = service_mgr.ecs.get_waiter("tasks_stopped")
try:
waiter.wait(
- tasks=[module.params['task']],
- cluster=module.params['cluster'],
+ tasks=[module.params["task"]],
+ cluster=module.params["cluster"],
)
except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, 'Timeout waiting for task to stop')
+ module.fail_json_aws(e, "Timeout waiting for task to stop")
- results['changed'] = True
+ results["changed"] = True
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
index a8b5e97d8..25a786e4f 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: ecs_taskdefinition
version_added: 1.0.0
@@ -629,50 +627,72 @@ options:
expression:
description: A cluster query language expression to apply to the constraint.
type: str
+ runtime_platform:
+ version_added: 6.4.0
+ description:
+ - runtime platform configuration for the task
+ required: false
+ type: dict
+ default: {
+ "operatingSystemFamily": "LINUX",
+ "cpuArchitecture": "X86_64"
+ }
+ suboptions:
+ cpuArchitecture:
+ description: The CPU Architecture type to be used by the task
+ type: str
+ required: false
+ choices: ['X86_64', 'ARM64']
+ operatingSystemFamily:
+ description: OS type to be used by the task
+ type: str
+ required: false
+ choices: ['LINUX', 'WINDOWS_SERVER_2019_FULL', 'WINDOWS_SERVER_2019_CORE', 'WINDOWS_SERVER_2022_FULL', 'WINDOWS_SERVER_2022_CORE']
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create task definition
community.aws.ecs_taskdefinition:
containers:
- - name: simple-app
- cpu: 10
- essential: true
- image: "httpd:2.4"
- memory: 300
- mountPoints:
- - containerPath: /usr/local/apache2/htdocs
- sourceVolume: my-vol
- portMappings:
- - containerPort: 80
- hostPort: 80
- logConfiguration:
- logDriver: awslogs
- options:
- awslogs-group: /ecs/test-cluster-taskdef
- awslogs-region: us-west-2
- awslogs-stream-prefix: ecs
- - name: busybox
- command:
- - >
- /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
- </h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
- cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
- cpu: 10
- entryPoint:
- - sh
- - "-c"
- essential: false
- image: busybox
- memory: 200
- volumesFrom:
- - sourceContainer: simple-app
+ - name: simple-app
+ cpu: 10
+ essential: true
+ image: "httpd:2.4"
+ memory: 300
+ mountPoints:
+ - containerPath: /usr/local/apache2/htdocs
+ sourceVolume: my-vol
+ portMappings:
+ - containerPort: 80
+ hostPort: 80
+ logConfiguration:
+ logDriver: awslogs
+ options:
+ awslogs-group: /ecs/test-cluster-taskdef
+ awslogs-region: us-west-2
+ awslogs-stream-prefix: ecs
+ - name: busybox
+ command:
+ - >
+ /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1>
+ <h2>Congratulations!</h2>
+ <p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
+ cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
+ cpu: 10
+ entryPoint:
+ - sh
+ - "-c"
+ essential: false
+ image: busybox
+ memory: 200
+ volumesFrom:
+ - sourceContainer: simple-app
volumes:
- - name: my-vol
+ - name: my-vol
family: test-cluster-taskdef
state: present
register: task_output
@@ -681,26 +701,26 @@ EXAMPLES = r'''
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
state: present
- name: Create task definition
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
launch_type: FARGATE
cpu: 512
memory: 1024
@@ -711,36 +731,36 @@ EXAMPLES = r'''
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
- dependsOn:
- - containerName: "simple-app"
- condition: "start"
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ dependsOn:
+ - containerName: "simple-app"
+ condition: "start"
# Create Task Definition with Environment Variables and Secrets
- name: Create task definition
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- environment:
- - name: "PORT"
- value: "8080"
- secrets:
- # For variables stored in Secrets Manager
- - name: "NGINX_HOST"
- valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
- # For variables stored in Parameter Store
- - name: "API_KEY"
- valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
+ - name: nginx
+ essential: true
+ image: "nginx"
+ environment:
+ - name: "PORT"
+ value: "8080"
+ secrets:
+ # For variables stored in Secrets Manager
+ - name: "NGINX_HOST"
+ valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
+ # For variables stored in Parameter Store
+ - name: "API_KEY"
+ valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
launch_type: FARGATE
cpu: 512
memory: 1GB
@@ -752,39 +772,40 @@ EXAMPLES = r'''
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
- healthCheck:
- command:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ healthCheck:
+ command:
- CMD-SHELL
- /app/healthcheck.py
- interval: 60
- retries: 3
- startPeriod: 15
- timeout: 15
+ interval: 60
+ retries: 3
+ startPeriod: 15
+ timeout: 15
state: present
-'''
-RETURN = r'''
+"""
+
+RETURN = r"""
taskdefinition:
description: a reflection of the input parameters
type: dict
returned: always
-'''
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class EcsTaskManager:
@@ -793,49 +814,65 @@ class EcsTaskManager:
def __init__(self, module):
self.module = module
- self.ecs = module.client('ecs', AWSRetry.jittered_backoff())
+ self.ecs = module.client("ecs", AWSRetry.jittered_backoff())
def describe_task(self, task_name):
try:
response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name)
- return response['taskDefinition']
+ return response["taskDefinition"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
return None
- def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions,
- volumes, launch_type, cpu, memory, placement_constraints):
+ def register_task(
+ self,
+ family,
+ task_role_arn,
+ execution_role_arn,
+ network_mode,
+ container_definitions,
+ volumes,
+ launch_type,
+ cpu,
+ memory,
+ placement_constraints,
+ runtime_platform,
+ ):
validated_containers = []
# Ensures the number parameters are int as required by the AWS SDK
for container in container_definitions:
- for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'):
+ for param in ("memory", "cpu", "memoryReservation", "startTimeout", "stopTimeout"):
if param in container:
container[param] = int(container[param])
- if 'portMappings' in container:
- for port_mapping in container['portMappings']:
- for port in ('hostPort', 'containerPort'):
+ if "portMappings" in container:
+ for port_mapping in container["portMappings"]:
+ for port in ("hostPort", "containerPort"):
if port in port_mapping:
port_mapping[port] = int(port_mapping[port])
- if network_mode == 'awsvpc' and 'hostPort' in port_mapping:
- if port_mapping['hostPort'] != port_mapping.get('containerPort'):
- self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as "
- "container port or not be set")
-
- if 'linuxParameters' in container:
- for linux_param in container.get('linuxParameters'):
- if linux_param == 'tmpfs':
- for tmpfs_param in container['linuxParameters']['tmpfs']:
- if 'size' in tmpfs_param:
- tmpfs_param['size'] = int(tmpfs_param['size'])
-
- for param in ('maxSwap', 'swappiness', 'sharedMemorySize'):
+ if network_mode == "awsvpc" and "hostPort" in port_mapping:
+ if port_mapping["hostPort"] != port_mapping.get("containerPort"):
+ self.module.fail_json(
+ msg=(
+ "In awsvpc network mode, host port must be set to the same as "
+ "container port or not be set"
+ )
+ )
+
+ if "linuxParameters" in container:
+ for linux_param in container.get("linuxParameters"):
+ if linux_param == "tmpfs":
+ for tmpfs_param in container["linuxParameters"]["tmpfs"]:
+ if "size" in tmpfs_param:
+ tmpfs_param["size"] = int(tmpfs_param["size"])
+
+ for param in ("maxSwap", "swappiness", "sharedMemorySize"):
if param in linux_param:
- container['linuxParameters'][param] = int(container['linuxParameters'][param])
+ container["linuxParameters"][param] = int(container["linuxParameters"][param])
- if 'ulimits' in container:
- for limits_mapping in container['ulimits']:
- for limit in ('softLimit', 'hardLimit'):
+ if "ulimits" in container:
+ for limits_mapping in container["ulimits"]:
+ for limit in ("softLimit", "hardLimit"):
if limit in limits_mapping:
limits_mapping[limit] = int(limits_mapping[limit])
@@ -845,47 +882,44 @@ class EcsTaskManager:
family=family,
taskRoleArn=task_role_arn,
containerDefinitions=container_definitions,
- volumes=volumes
+ volumes=volumes,
)
- if network_mode != 'default':
- params['networkMode'] = network_mode
+ if network_mode != "default":
+ params["networkMode"] = network_mode
if cpu:
- params['cpu'] = cpu
+ params["cpu"] = cpu
if memory:
- params['memory'] = memory
+ params["memory"] = memory
if launch_type:
- params['requiresCompatibilities'] = [launch_type]
+ params["requiresCompatibilities"] = [launch_type]
if execution_role_arn:
- params['executionRoleArn'] = execution_role_arn
+ params["executionRoleArn"] = execution_role_arn
if placement_constraints:
- params['placementConstraints'] = placement_constraints
+ params["placementConstraints"] = placement_constraints
+ if runtime_platform:
+ params["runtimePlatform"] = runtime_platform
try:
response = self.ecs.register_task_definition(aws_retry=True, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Failed to register task")
- return response['taskDefinition']
+ return response["taskDefinition"]
def describe_task_definitions(self, family):
- data = {
- "taskDefinitionArns": [],
- "nextToken": None
- }
+ data = {"taskDefinitionArns": [], "nextToken": None}
def fetch():
# Boto3 is weird about params passed, so only pass nextToken if we have a value
- params = {
- 'familyPrefix': family
- }
+ params = {"familyPrefix": family}
- if data['nextToken']:
- params['nextToken'] = data['nextToken']
+ if data["nextToken"]:
+ params["nextToken"] = data["nextToken"]
result = self.ecs.list_task_definitions(**params)
- data['taskDefinitionArns'] += result['taskDefinitionArns']
- data['nextToken'] = result.get('nextToken', None)
- return data['nextToken'] is not None
+ data["taskDefinitionArns"] += result["taskDefinitionArns"]
+ data["nextToken"] = result.get("nextToken", None)
+ return data["nextToken"] is not None
# Fetch all the arns, possibly across multiple pages
while fetch():
@@ -894,118 +928,154 @@ class EcsTaskManager:
# Return the full descriptions of the task definitions, sorted ascending by revision
return list(
sorted(
- [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
- key=lambda td: td['revision']
+ [
+ self.ecs.describe_task_definition(taskDefinition=arn)["taskDefinition"]
+ for arn in data["taskDefinitionArns"]
+ ],
+ key=lambda td: td["revision"],
)
)
def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
- return response['taskDefinition']
+ return response["taskDefinition"]
def main():
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
- arn=dict(required=False, type='str'),
- family=dict(required=False, type='str'),
- revision=dict(required=False, type='int'),
- force_create=dict(required=False, default=False, type='bool'),
- containers=dict(required=True, type='list', elements='dict'),
- network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'),
- task_role_arn=dict(required=False, default='', type='str'),
- execution_role_arn=dict(required=False, default='', type='str'),
- volumes=dict(required=False, type='list', elements='dict'),
- launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
+ state=dict(required=True, choices=["present", "absent"]),
+ arn=dict(required=False, type="str"),
+ family=dict(required=False, type="str"),
+ revision=dict(required=False, type="int"),
+ force_create=dict(required=False, default=False, type="bool"),
+ containers=dict(required=True, type="list", elements="dict"),
+ network_mode=dict(
+ required=False, default="bridge", choices=["default", "bridge", "host", "none", "awsvpc"], type="str"
+ ),
+ task_role_arn=dict(required=False, default="", type="str"),
+ execution_role_arn=dict(required=False, default="", type="str"),
+ volumes=dict(required=False, type="list", elements="dict"),
+ launch_type=dict(required=False, choices=["EC2", "FARGATE"]),
cpu=dict(),
- memory=dict(required=False, type='str'),
- placement_constraints=dict(required=False, type='list', elements='dict',
- options=dict(type=dict(type='str'), expression=dict(type='str'))),
+ memory=dict(required=False, type="str"),
+ placement_constraints=dict(
+ required=False,
+ type="list",
+ elements="dict",
+ options=dict(type=dict(type="str"), expression=dict(type="str")),
+ ),
+ runtime_platform=dict(
+ required=False,
+ default={"operatingSystemFamily": "LINUX", "cpuArchitecture": "X86_64"},
+ type="dict",
+ options=dict(
+ cpuArchitecture=dict(required=False, choices=["X86_64", "ARM64"]),
+ operatingSystemFamily=dict(
+ required=False,
+ choices=[
+ "LINUX",
+ "WINDOWS_SERVER_2019_FULL",
+ "WINDOWS_SERVER_2019_CORE",
+ "WINDOWS_SERVER_2022_FULL",
+ "WINDOWS_SERVER_2022_CORE",
+ ],
+ ),
+ ),
+ ),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])]
- )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[("launch_type", "FARGATE", ["cpu", "memory"])],
+ )
task_to_describe = None
task_mgr = EcsTaskManager(module)
results = dict(changed=False)
- if module.params['state'] == 'present':
- if 'containers' not in module.params or not module.params['containers']:
+ if module.params["state"] == "present":
+ if "containers" not in module.params or not module.params["containers"]:
module.fail_json(msg="To use task definitions, a list of containers must be specified")
- if 'family' not in module.params or not module.params['family']:
+ if "family" not in module.params or not module.params["family"]:
module.fail_json(msg="To use task definitions, a family must be specified")
- network_mode = module.params['network_mode']
- launch_type = module.params['launch_type']
- placement_constraints = module.params['placement_constraints']
- if launch_type == 'FARGATE':
- if network_mode != 'awsvpc':
+ network_mode = module.params["network_mode"]
+ launch_type = module.params["launch_type"]
+ placement_constraints = module.params["placement_constraints"]
+ if launch_type == "FARGATE":
+ if network_mode != "awsvpc":
module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc")
if placement_constraints:
module.fail_json(msg="Task placement constraints are not supported for tasks run on Fargate")
- for container in module.params['containers']:
- if container.get('links') and network_mode == 'awsvpc':
- module.fail_json(msg='links parameter is not supported if network mode is awsvpc.')
+ for container in module.params["containers"]:
+ if container.get("links") and network_mode == "awsvpc":
+ module.fail_json(msg="links parameter is not supported if network mode is awsvpc.")
- for environment in container.get('environment', []):
- environment['value'] = environment['value']
+ for environment in container.get("environment", []):
+ environment["value"] = environment["value"]
- for environment_file in container.get('environmentFiles', []):
- if environment_file['type'] != 's3':
- module.fail_json(msg='The only supported value for environmentFiles is s3.')
+ for environment_file in container.get("environmentFiles", []):
+ if environment_file["type"] != "s3":
+ module.fail_json(msg="The only supported value for environmentFiles is s3.")
- for linux_param in container.get('linuxParameters', {}):
- if linux_param == 'maxSwap' and launch_type == 'FARGATE':
- module.fail_json(msg='devices parameter is not supported with the FARGATE launch type.')
+ for linux_param in container.get("linuxParameters", {}):
+ if linux_param == "maxSwap" and launch_type == "FARGATE":
+ module.fail_json(msg="devices parameter is not supported with the FARGATE launch type.")
- if linux_param == 'maxSwap' and launch_type == 'FARGATE':
- module.fail_json(msg='maxSwap parameter is not supported with the FARGATE launch type.')
- elif linux_param == 'maxSwap' and int(container['linuxParameters']['maxSwap']) < 0:
- module.fail_json(msg='Accepted values for maxSwap are 0 or any positive integer.')
+ if linux_param == "maxSwap" and launch_type == "FARGATE":
+ module.fail_json(msg="maxSwap parameter is not supported with the FARGATE launch type.")
+ elif linux_param == "maxSwap" and int(container["linuxParameters"]["maxSwap"]) < 0:
+ module.fail_json(msg="Accepted values for maxSwap are 0 or any positive integer.")
- if (
- linux_param == 'swappiness' and
- (int(container['linuxParameters']['swappiness']) < 0 or int(container['linuxParameters']['swappiness']) > 100)
+ if linux_param == "swappiness" and (
+ int(container["linuxParameters"]["swappiness"]) < 0
+ or int(container["linuxParameters"]["swappiness"]) > 100
):
- module.fail_json(msg='Accepted values for swappiness are whole numbers between 0 and 100.')
+ module.fail_json(msg="Accepted values for swappiness are whole numbers between 0 and 100.")
- if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE':
- module.fail_json(msg='sharedMemorySize parameter is not supported with the FARGATE launch type.')
+ if linux_param == "sharedMemorySize" and launch_type == "FARGATE":
+ module.fail_json(msg="sharedMemorySize parameter is not supported with the FARGATE launch type.")
- if linux_param == 'tmpfs' and launch_type == 'FARGATE':
- module.fail_json(msg='tmpfs parameter is not supported with the FARGATE launch type.')
+ if linux_param == "tmpfs" and launch_type == "FARGATE":
+ module.fail_json(msg="tmpfs parameter is not supported with the FARGATE launch type.")
- if container.get('hostname') and network_mode == 'awsvpc':
- module.fail_json(msg='hostname parameter is not supported when the awsvpc network mode is used.')
+ if container.get("hostname") and network_mode == "awsvpc":
+ module.fail_json(msg="hostname parameter is not supported when the awsvpc network mode is used.")
- if container.get('extraHosts') and network_mode == 'awsvpc':
- module.fail_json(msg='extraHosts parameter is not supported when the awsvpc network mode is used.')
+ if container.get("extraHosts") and network_mode == "awsvpc":
+ module.fail_json(msg="extraHosts parameter is not supported when the awsvpc network mode is used.")
- family = module.params['family']
- existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
+ family = module.params["family"]
+ existing_definitions_in_family = task_mgr.describe_task_definitions(module.params["family"])
- if 'revision' in module.params and module.params['revision']:
+ if "revision" in module.params and module.params["revision"]:
# The definition specifies revision. We must guarantee that an active revision of that number will result from this.
- revision = int(module.params['revision'])
+ revision = int(module.params["revision"])
# A revision has been explicitly specified. Attempt to locate a matching revision
- tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
+ tasks_defs_for_revision = [td for td in existing_definitions_in_family if td["revision"] == revision]
existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
- if existing and existing['status'] != "ACTIVE":
+ if existing and existing["status"] != "ACTIVE":
# We cannot reactivate an inactive revision
- module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision))
+ module.fail_json(
+ msg=f"A task in family '{family}' already exists for revision {int(revision)}, but it is inactive"
+ )
elif not existing:
if not existing_definitions_in_family and revision != 1:
- module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
- elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
- module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
- (revision, existing_definitions_in_family[-1]['revision'] + 1))
+ module.fail_json(
+ msg=f"You have specified a revision of {int(revision)} but a created revision would be 1"
+ )
+ elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision:
+ module.fail_json(
+ msg=(
+ f"You have specified a revision of {int(revision)} but a created revision would be"
+ f" {int(existing_definitions_in_family[-1]['revision'] + 1)}"
+ )
+ )
else:
existing = None
@@ -1025,9 +1095,9 @@ def main():
if list_val not in right_list:
# if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp')
# fill in that default if absent and see if it is in right_list then
- if isinstance(list_val, dict) and not list_val.get('protocol'):
+ if isinstance(list_val, dict) and not list_val.get("protocol"):
modified_list_val = dict(list_val)
- modified_list_val.update(protocol='tcp')
+ modified_list_val.update(protocol="tcp")
if modified_list_val in right_list:
continue
else:
@@ -1037,24 +1107,32 @@ def main():
for k, v in right.items():
if v and k not in left:
# 'essential' defaults to True when not specified
- if k == 'essential' and v is True:
+ if k == "essential" and v is True:
pass
else:
return False
return True
- def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, existing_task_definition):
- if td['status'] != "ACTIVE":
+ def _task_definition_matches(
+ requested_volumes,
+ requested_containers,
+ requested_task_role_arn,
+ requested_launch_type,
+ existing_task_definition,
+ ):
+ if td["status"] != "ACTIVE":
return None
- if requested_task_role_arn != td.get('taskRoleArn', ""):
+ if requested_task_role_arn != td.get("taskRoleArn", ""):
return None
- if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []):
+ if requested_launch_type is not None and requested_launch_type not in td.get(
+ "requiresCompatibilities", []
+ ):
return None
- existing_volumes = td.get('volumes', []) or []
+ existing_volumes = td.get("volumes", []) or []
if len(requested_volumes) != len(existing_volumes):
# Nope.
@@ -1072,7 +1150,7 @@ def main():
if not found:
return None
- existing_containers = td.get('containerDefinitions', []) or []
+ existing_containers = td.get("containerDefinitions", []) or []
if len(requested_containers) != len(existing_containers):
# Nope.
@@ -1093,42 +1171,51 @@ def main():
# No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
for td in existing_definitions_in_family:
- requested_volumes = module.params['volumes'] or []
- requested_containers = module.params['containers'] or []
- requested_task_role_arn = module.params['task_role_arn']
- requested_launch_type = module.params['launch_type']
- existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td)
+ requested_volumes = module.params["volumes"] or []
+ requested_containers = module.params["containers"] or []
+ requested_task_role_arn = module.params["task_role_arn"]
+ requested_launch_type = module.params["launch_type"]
+ existing = _task_definition_matches(
+ requested_volumes, requested_containers, requested_task_role_arn, requested_launch_type, td
+ )
if existing:
break
- if existing and not module.params.get('force_create'):
+ if existing and not module.params.get("force_create"):
# Awesome. Have an existing one. Nothing to do.
- results['taskdefinition'] = existing
+ results["taskdefinition"] = existing
else:
if not module.check_mode:
# Doesn't exist. create it.
- volumes = module.params.get('volumes', []) or []
- results['taskdefinition'] = task_mgr.register_task(module.params['family'],
- module.params['task_role_arn'],
- module.params['execution_role_arn'],
- module.params['network_mode'],
- module.params['containers'],
- volumes,
- module.params['launch_type'],
- module.params['cpu'],
- module.params['memory'],
- module.params['placement_constraints'],)
- results['changed'] = True
-
- elif module.params['state'] == 'absent':
+ volumes = module.params.get("volumes", []) or []
+ results["taskdefinition"] = task_mgr.register_task(
+ module.params["family"],
+ module.params["task_role_arn"],
+ module.params["execution_role_arn"],
+ module.params["network_mode"],
+ module.params["containers"],
+ volumes,
+ module.params["launch_type"],
+ module.params["cpu"],
+ module.params["memory"],
+ module.params["placement_constraints"],
+ module.params["runtime_platform"],
+ )
+ results["changed"] = True
+
+ elif module.params["state"] == "absent":
# When de-registering a task definition, we can specify the ARN OR the family and revision.
- if module.params['state'] == 'absent':
- if 'arn' in module.params and module.params['arn'] is not None:
- task_to_describe = module.params['arn']
- elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
- module.params['revision'] is not None:
- task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
+ if module.params["state"] == "absent":
+ if "arn" in module.params and module.params["arn"] is not None:
+ task_to_describe = module.params["arn"]
+ elif (
+ "family" in module.params
+ and module.params["family"] is not None
+ and "revision" in module.params
+ and module.params["revision"] is not None
+ ):
+ task_to_describe = module.params["family"] + ":" + str(module.params["revision"])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
@@ -1138,16 +1225,16 @@ def main():
pass
else:
# It exists, so we should delete it and mark changed. Return info about the task definition deleted
- results['taskdefinition'] = existing
- if 'status' in existing and existing['status'] == "INACTIVE":
- results['changed'] = False
+ results["taskdefinition"] = existing
+ if "status" in existing and existing["status"] == "INACTIVE":
+ results["changed"] = False
else:
if not module.check_mode:
task_mgr.deregister_task(task_to_describe)
- results['changed'] = True
+ results["changed"] = True
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
index 6fbc41731..5e235096d 100644
--- a/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
+++ b/ansible_collections/community/aws/plugins/modules/ecs_taskdefinition_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ecs_taskdefinition_info
version_added: 1.0.0
@@ -27,20 +25,19 @@ options:
required: true
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- community.aws.ecs_taskdefinition_info:
task_definition: test-td
-'''
+"""
-RETURN = '''
+RETURN = r"""
container_definitions:
description: Returns a list of complex objects representing the containers
returned: success
@@ -348,33 +345,34 @@ placement_constraints:
description: A cluster query language expression to apply to the constraint.
returned: when present
type: str
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def main():
argument_spec = dict(
- task_definition=dict(required=True, type='str')
+ task_definition=dict(required=True, type="str"),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- ecs = module.client('ecs')
+ ecs = module.client("ecs")
try:
- ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
+ ecs_td = ecs.describe_task_definition(taskDefinition=module.params["task_definition"])["taskDefinition"]
except botocore.exceptions.ClientError:
ecs_td = {}
module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/efs.py b/ansible_collections/community/aws/plugins/modules/efs.py
index de1d563fb..6b9390f2b 100644
--- a/ansible_collections/community/aws/plugins/modules/efs.py
+++ b/ansible_collections/community/aws/plugins/modules/efs.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: efs
version_added: 1.0.0
@@ -102,34 +100,33 @@ options:
version_added: 2.1.0
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: EFS provisioning
community.aws.efs:
state: present
name: myTestEFS
tags:
- Name: myTestNameTag
- purpose: file-storage
+ Name: myTestNameTag
+ purpose: file-storage
targets:
- - subnet_id: subnet-748c5d03
- security_groups: [ "sg-1a2b3c4d" ]
+ - subnet_id: subnet-748c5d03
+ security_groups: ["sg-1a2b3c4d"]
- name: Modifying EFS data
community.aws.efs:
state: present
name: myTestEFS
tags:
- name: myAnotherTestTag
+ name: myAnotherTestTag
targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
+ - subnet_id: subnet-7654fdca
+ security_groups: ["sg-4c5d6f7a"]
- name: Set a lifecycle policy
community.aws.efs:
@@ -137,8 +134,8 @@ EXAMPLES = r'''
name: myTestEFS
transition_to_ia: 7
targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
+ - subnet_id: subnet-7654fdca
+ security_groups: ["sg-4c5d6f7a"]
- name: Remove a lifecycle policy
community.aws.efs:
@@ -146,16 +143,16 @@ EXAMPLES = r'''
name: myTestEFS
transition_to_ia: None
targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
+ - subnet_id: subnet-7654fdca
+ security_groups: ["sg-4c5d6f7a"]
- name: Deleting EFS
community.aws.efs:
state: absent
name: myTestEFS
-'''
+"""
-RETURN = r'''
+RETURN = r"""
creation_time:
description: timestamp of creation date
returned: always
@@ -244,8 +241,7 @@ tags:
"name": "my-efs",
"key": "Value"
}
-
-'''
+"""
from time import sleep
from time import time as timestamp
@@ -257,11 +253,12 @@ except ImportError as e:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def _index_by_key(key, items):
@@ -269,35 +266,34 @@ def _index_by_key(key, items):
class EFSConnection(object):
-
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
- STATE_CREATING = 'creating'
- STATE_AVAILABLE = 'available'
- STATE_DELETING = 'deleting'
- STATE_DELETED = 'deleted'
+ STATE_CREATING = "creating"
+ STATE_AVAILABLE = "available"
+ STATE_DELETING = "deleting"
+ STATE_DELETED = "deleted"
def __init__(self, module):
- self.connection = module.client('efs')
+ self.connection = module.client("efs")
region = module.region
self.module = module
self.region = region
- self.wait = module.params.get('wait')
- self.wait_timeout = module.params.get('wait_timeout')
+ self.wait = module.params.get("wait")
+ self.wait_timeout = module.params.get("wait_timeout")
def get_file_systems(self, **kwargs):
"""
- Returns generator of file systems including all attributes of FS
+ Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
- 'FileSystems',
+ "FileSystems",
self.connection.describe_file_systems,
- **kwargs
+ **kwargs,
)
for item in items:
- item['Name'] = item['CreationToken']
- item['CreationTime'] = str(item['CreationTime'])
+ item["Name"] = item["CreationToken"]
+ item["CreationTime"] = str(item["CreationTime"])
"""
In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
@@ -305,90 +301,92 @@ class EFSConnection(object):
AWS documentation is available here:
https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
- item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
- item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
- if 'Timestamp' in item['SizeInBytes']:
- item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
- if item['LifeCycleState'] == self.STATE_AVAILABLE:
- item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
- item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/"
+ item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/"
+ if "Timestamp" in item["SizeInBytes"]:
+ item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"])
+ if item["LifeCycleState"] == self.STATE_AVAILABLE:
+ item["Tags"] = self.get_tags(FileSystemId=item["FileSystemId"])
+ item["MountTargets"] = list(self.get_mount_targets(FileSystemId=item["FileSystemId"]))
else:
- item['Tags'] = {}
- item['MountTargets'] = []
+ item["Tags"] = {}
+ item["MountTargets"] = []
yield item
def get_tags(self, **kwargs):
"""
- Returns tag list for selected instance of EFS
+ Returns tag list for selected instance of EFS
"""
- tags = self.connection.describe_tags(**kwargs)['Tags']
+ tags = self.connection.describe_tags(**kwargs)["Tags"]
return tags
def get_mount_targets(self, **kwargs):
"""
- Returns mount targets for selected instance of EFS
+ Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
- 'MountTargets',
+ "MountTargets",
self.connection.describe_mount_targets,
- **kwargs
+ **kwargs,
)
for target in targets:
- if target['LifeCycleState'] == self.STATE_AVAILABLE:
- target['SecurityGroups'] = list(self.get_security_groups(
- MountTargetId=target['MountTargetId']
- ))
+ if target["LifeCycleState"] == self.STATE_AVAILABLE:
+ target["SecurityGroups"] = list(self.get_security_groups(MountTargetId=target["MountTargetId"]))
else:
- target['SecurityGroups'] = []
+ target["SecurityGroups"] = []
yield target
def get_security_groups(self, **kwargs):
"""
- Returns security groups for selected instance of EFS
+ Returns security groups for selected instance of EFS
"""
return iterate_all(
- 'SecurityGroups',
+ "SecurityGroups",
self.connection.describe_mount_target_security_groups,
- **kwargs
+ **kwargs,
)
def get_file_system_id(self, name):
"""
- Returns ID of instance by instance name
+ Returns ID of instance by instance name
"""
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- CreationToken=name
- ))
- return info and info['FileSystemId'] or None
+ info = first_or_default(
+ iterate_all(
+ "FileSystems",
+ self.connection.describe_file_systems,
+ CreationToken=name,
+ )
+ )
+ return info and info["FileSystemId"] or None
def get_file_system_state(self, name, file_system_id=None):
"""
- Returns state of filesystem by EFS id/name
+ Returns state of filesystem by EFS id/name
"""
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- CreationToken=name,
- FileSystemId=file_system_id
- ))
- return info and info['LifeCycleState'] or self.STATE_DELETED
+ info = first_or_default(
+ iterate_all(
+ "FileSystems",
+ self.connection.describe_file_systems,
+ CreationToken=name,
+ FileSystemId=file_system_id,
+ )
+ )
+ return info and info["LifeCycleState"] or self.STATE_DELETED
def get_mount_targets_in_state(self, file_system_id, states=None):
"""
- Returns states of mount targets of selected EFS with selected state(s) (optional)
+ Returns states of mount targets of selected EFS with selected state(s) (optional)
"""
targets = iterate_all(
- 'MountTargets',
+ "MountTargets",
self.connection.describe_mount_targets,
- FileSystemId=file_system_id
+ FileSystemId=file_system_id,
)
if states:
if not isinstance(states, list):
states = [states]
- targets = filter(lambda target: target['LifeCycleState'] in states, targets)
+ targets = filter(lambda target: target["LifeCycleState"] in states, targets)
return list(targets)
@@ -396,47 +394,53 @@ class EFSConnection(object):
"""
Returns throughput mode for selected EFS instance
"""
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- **kwargs
- ))
+ info = first_or_default(
+ iterate_all(
+ "FileSystems",
+ self.connection.describe_file_systems,
+ **kwargs,
+ )
+ )
- return info and info['ThroughputMode'] or None
+ return info and info["ThroughputMode"] or None
def get_provisioned_throughput_in_mibps(self, **kwargs):
"""
Returns throughput mode for selected EFS instance
"""
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- **kwargs
- ))
- return info.get('ProvisionedThroughputInMibps', None)
+ info = first_or_default(
+ iterate_all(
+ "FileSystems",
+ self.connection.describe_file_systems,
+ **kwargs,
+ )
+ )
+ return info.get("ProvisionedThroughputInMibps", None)
- def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps):
+ def create_file_system(
+ self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps
+ ):
"""
- Creates new filesystem with selected name
+ Creates new filesystem with selected name
"""
changed = False
state = self.get_file_system_state(name)
params = {}
- params['CreationToken'] = name
- params['PerformanceMode'] = performance_mode
+ params["CreationToken"] = name
+ params["PerformanceMode"] = performance_mode
if encrypt:
- params['Encrypted'] = encrypt
+ params["Encrypted"] = encrypt
if kms_key_id is not None:
- params['KmsKeyId'] = kms_key_id
+ params["KmsKeyId"] = kms_key_id
if throughput_mode:
- params['ThroughputMode'] = throughput_mode
+ params["ThroughputMode"] = throughput_mode
if provisioned_throughput_in_mibps:
- params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
+ params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps
if state in [self.STATE_DELETING, self.STATE_DELETED]:
wait_for(
lambda: self.get_file_system_state(name),
- self.STATE_DELETED
+ self.STATE_DELETED,
)
try:
self.connection.create_file_system(**params)
@@ -450,7 +454,7 @@ class EFSConnection(object):
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
- self.wait_timeout
+ self.wait_timeout,
)
return changed
@@ -467,14 +471,14 @@ class EFSConnection(object):
current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id)
params = dict()
if throughput_mode and throughput_mode != current_mode:
- params['ThroughputMode'] = throughput_mode
+ params["ThroughputMode"] = throughput_mode
if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput:
- params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
+ params["ProvisionedThroughputInMibps"] = provisioned_throughput_in_mibps
if len(params) > 0:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
- self.wait_timeout
+ self.wait_timeout,
)
try:
self.connection.update_file_system(FileSystemId=fs_id, **params)
@@ -492,11 +496,11 @@ class EFSConnection(object):
if state in [self.STATE_AVAILABLE, self.STATE_CREATING]:
fs_id = self.get_file_system_id(name)
current_policies = self.connection.describe_lifecycle_configuration(FileSystemId=fs_id)
- if transition_to_ia == 'None':
+ if transition_to_ia == "None":
LifecyclePolicies = []
else:
- LifecyclePolicies = [{'TransitionToIA': 'AFTER_' + transition_to_ia + '_DAYS'}]
- if current_policies.get('LifecyclePolicies') != LifecyclePolicies:
+ LifecyclePolicies = [{"TransitionToIA": "AFTER_" + transition_to_ia + "_DAYS"}]
+ if current_policies.get("LifecyclePolicies") != LifecyclePolicies:
response = self.connection.put_lifecycle_configuration(
FileSystemId=fs_id,
LifecyclePolicies=LifecyclePolicies,
@@ -506,20 +510,19 @@ class EFSConnection(object):
def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps):
"""
- Change attributes (mount targets and tags) of filesystem by name
+ Change attributes (mount targets and tags) of filesystem by name
"""
result = False
fs_id = self.get_file_system_id(name)
if tags is not None:
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags)
+ tags_need_modify, tags_to_delete = compare_aws_tags(
+ boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags
+ )
if tags_to_delete:
try:
- self.connection.delete_tags(
- FileSystemId=fs_id,
- TagKeys=tags_to_delete
- )
+ self.connection.delete_tags(FileSystemId=fs_id, TagKeys=tags_to_delete)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Unable to delete tags.")
@@ -528,8 +531,7 @@ class EFSConnection(object):
if tags_need_modify:
try:
self.connection.create_tags(
- FileSystemId=fs_id,
- Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
+ FileSystemId=fs_id, Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Unable to create tags.")
@@ -540,54 +542,56 @@ class EFSConnection(object):
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
- 0
+ 0,
)
- current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id))
- targets = _index_by_key('SubnetId', targets)
+ current_targets = _index_by_key("SubnetId", self.get_mount_targets(FileSystemId=fs_id))
+ targets = _index_by_key("SubnetId", targets)
- targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
- targets, True)
+ targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, targets, True)
# To modify mount target it should be deleted and created again
- changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
- current_targets[sid], targets[sid])]
+ changed = [
+ sid
+ for sid in intersection
+ if not targets_equal(
+ ["SubnetId", "IpAddress", "NetworkInterfaceId"], current_targets[sid], targets[sid]
+ )
+ ]
targets_to_delete = list(targets_to_delete) + changed
targets_to_create = list(targets_to_create) + changed
if targets_to_delete:
for sid in targets_to_delete:
- self.connection.delete_mount_target(
- MountTargetId=current_targets[sid]['MountTargetId']
- )
+ self.connection.delete_mount_target(MountTargetId=current_targets[sid]["MountTargetId"])
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
- 0
+ 0,
)
result = True
if targets_to_create:
for sid in targets_to_create:
- self.connection.create_mount_target(
- FileSystemId=fs_id,
- **targets[sid]
- )
+ self.connection.create_mount_target(FileSystemId=fs_id, **targets[sid])
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0,
- self.wait_timeout
+ self.wait_timeout,
)
result = True
# If no security groups were passed into the module, then do not change it.
- security_groups_to_update = [sid for sid in intersection if
- 'SecurityGroups' in targets[sid] and
- current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']]
+ security_groups_to_update = [
+ sid
+ for sid in intersection
+ if "SecurityGroups" in targets[sid]
+ and current_targets[sid]["SecurityGroups"] != targets[sid]["SecurityGroups"]
+ ]
if security_groups_to_update:
for sid in security_groups_to_update:
self.connection.modify_mount_target_security_groups(
- MountTargetId=current_targets[sid]['MountTargetId'],
- SecurityGroups=targets[sid].get('SecurityGroups', None)
+ MountTargetId=current_targets[sid]["MountTargetId"],
+ SecurityGroups=targets[sid].get("SecurityGroups", None),
)
result = True
@@ -595,14 +599,14 @@ class EFSConnection(object):
def delete_file_system(self, name, file_system_id=None):
"""
- Removes EFS instance by id/name
+ Removes EFS instance by id/name
"""
result = False
state = self.get_file_system_state(name, file_system_id)
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
wait_for(
lambda: self.get_file_system_state(name),
- self.STATE_AVAILABLE
+ self.STATE_AVAILABLE,
)
if not file_system_id:
file_system_id = self.get_file_system_id(name)
@@ -614,27 +618,27 @@ class EFSConnection(object):
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED,
- self.wait_timeout
+ self.wait_timeout,
)
return result
def delete_mount_targets(self, file_system_id):
"""
- Removes mount targets by EFS id
+ Removes mount targets by EFS id
"""
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
- 0
+ 0,
)
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
for target in targets:
- self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
+ self.connection.delete_mount_target(MountTargetId=target["MountTargetId"])
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
- 0
+ 0,
)
return len(targets) > 0
@@ -642,7 +646,7 @@ class EFSConnection(object):
def iterate_all(attr, map_method, **kwargs):
"""
- Method creates iterator from result set
+ Method creates iterator from result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
@@ -651,11 +655,11 @@ def iterate_all(attr, map_method, **kwargs):
data = map_method(**args)
for elm in data[attr]:
yield elm
- if 'NextMarker' in data:
- args['Marker'] = data['Nextmarker']
+ if "NextMarker" in data:
+ args["Marker"] = data["Nextmarker"]
continue
break
- except is_boto3_error_code('ThrottlingException'):
+ except is_boto3_error_code("ThrottlingException"):
if wait < 600:
sleep(wait)
wait = wait * 2
@@ -666,7 +670,7 @@ def iterate_all(attr, map_method, **kwargs):
def targets_equal(keys, a, b):
"""
- Method compare two mount targets by specified attributes
+ Method compare two mount targets by specified attributes
"""
for key in keys:
if key in b and a[key] != b[key]:
@@ -677,7 +681,7 @@ def targets_equal(keys, a, b):
def dict_diff(dict1, dict2, by_key=False):
"""
- Helper method to calculate difference of two dictionaries
+ Helper method to calculate difference of two dictionaries
"""
keys1 = set(dict1.keys() if by_key else dict1.items())
keys2 = set(dict2.keys() if by_key else dict2.items())
@@ -689,7 +693,7 @@ def dict_diff(dict1, dict2, by_key=False):
def first_or_default(items, default=None):
"""
- Helper method to fetch first element of list (if exists)
+ Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
@@ -698,13 +702,13 @@ def first_or_default(items, default=None):
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
"""
- Helper method to wait for desired value returned by callback method
+ Helper method to wait for desired value returned by callback method
"""
wait_start = timestamp()
while True:
if callback() != value:
if timeout != 0 and (timestamp() - wait_start > timeout):
- raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
+ raise RuntimeError("Wait timeout exceeded (" + str(timeout) + " sec)")
else:
sleep(5)
continue
@@ -713,67 +717,82 @@ def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS
def main():
"""
- Module action handler
+ Module action handler
"""
argument_spec = dict(
encrypt=dict(required=False, type="bool", default=False),
- state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
- kms_key_id=dict(required=False, type='str', default=None),
- purge_tags=dict(default=True, type='bool'),
- id=dict(required=False, type='str', default=None),
- name=dict(required=False, type='str', default=None),
- tags=dict(required=False, type="dict", aliases=['resource_tags']),
- targets=dict(required=False, type="list", default=[], elements='dict'),
- performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
- transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None),
- throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
- provisioned_throughput_in_mibps=dict(required=False, type='float'),
+ state=dict(required=False, type="str", choices=["present", "absent"], default="present"),
+ kms_key_id=dict(required=False, type="str", default=None),
+ purge_tags=dict(default=True, type="bool"),
+ id=dict(required=False, type="str", default=None),
+ name=dict(required=False, type="str", default=None),
+ tags=dict(required=False, type="dict", aliases=["resource_tags"]),
+ targets=dict(required=False, type="list", default=[], elements="dict"),
+ performance_mode=dict(
+ required=False, type="str", choices=["general_purpose", "max_io"], default="general_purpose"
+ ),
+ transition_to_ia=dict(required=False, type="str", choices=["None", "7", "14", "30", "60", "90"], default=None),
+ throughput_mode=dict(required=False, type="str", choices=["bursting", "provisioned"], default=None),
+ provisioned_throughput_in_mibps=dict(required=False, type="float"),
wait=dict(required=False, type="bool", default=False),
- wait_timeout=dict(required=False, type="int", default=0)
+ wait_timeout=dict(required=False, type="int", default=0),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
connection = EFSConnection(module)
- name = module.params.get('name')
- fs_id = module.params.get('id')
- tags = module.params.get('tags')
+ name = module.params.get("name")
+ fs_id = module.params.get("id")
+ tags = module.params.get("tags")
target_translations = {
- 'ip_address': 'IpAddress',
- 'security_groups': 'SecurityGroups',
- 'subnet_id': 'SubnetId'
+ "ip_address": "IpAddress",
+ "security_groups": "SecurityGroups",
+ "subnet_id": "SubnetId",
}
- targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
+ targets = [
+ dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get("targets")
+ ]
performance_mode_translations = {
- 'general_purpose': 'generalPurpose',
- 'max_io': 'maxIO'
+ "general_purpose": "generalPurpose",
+ "max_io": "maxIO",
}
- encrypt = module.params.get('encrypt')
- kms_key_id = module.params.get('kms_key_id')
- performance_mode = performance_mode_translations[module.params.get('performance_mode')]
- purge_tags = module.params.get('purge_tags')
- transition_to_ia = module.params.get('transition_to_ia')
- throughput_mode = module.params.get('throughput_mode')
- provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
- state = str(module.params.get('state')).lower()
+ encrypt = module.params.get("encrypt")
+ kms_key_id = module.params.get("kms_key_id")
+ performance_mode = performance_mode_translations[module.params.get("performance_mode")]
+ purge_tags = module.params.get("purge_tags")
+ transition_to_ia = module.params.get("transition_to_ia")
+ throughput_mode = module.params.get("throughput_mode")
+ provisioned_throughput_in_mibps = module.params.get("provisioned_throughput_in_mibps")
+ state = str(module.params.get("state")).lower()
changed = False
- if state == 'present':
+ if state == "present":
if not name:
- module.fail_json(msg='Name parameter is required for create')
+ module.fail_json(msg="Name parameter is required for create")
- changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
+ changed = connection.create_file_system(
+ name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps
+ )
changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
- changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
- throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
+ changed = (
+ connection.converge_file_system(
+ name=name,
+ tags=tags,
+ purge_tags=purge_tags,
+ targets=targets,
+ throughput_mode=throughput_mode,
+ provisioned_throughput_in_mibps=provisioned_throughput_in_mibps,
+ )
+ or changed
+ )
if transition_to_ia:
changed |= connection.update_lifecycle_policy(name, transition_to_ia)
result = first_or_default(connection.get_file_systems(CreationToken=name))
- elif state == 'absent':
+ elif state == "absent":
if not name and not fs_id:
- module.fail_json(msg='Either name or id parameter is required for delete')
+ module.fail_json(msg="Either name or id parameter is required for delete")
changed = connection.delete_file_system(name, fs_id)
result = None
@@ -782,5 +801,5 @@ def main():
module.exit_json(changed=changed, efs=result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/efs_info.py b/ansible_collections/community/aws/plugins/modules/efs_info.py
index 5ef436f3c..3a170a391 100644
--- a/ansible_collections/community/aws/plugins/modules/efs_info.py
+++ b/ansible_collections/community/aws/plugins/modules/efs_info.py
@@ -1,21 +1,19 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: efs_info
version_added: 1.0.0
short_description: Get information about Amazon EFS file systems
description:
- - This module can be used to search Amazon EFS file systems.
- Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)!
+- This module can be used to search Amazon EFS file systems.
+ Note that the M(community.aws.efs_info) module no longer returns C(ansible_facts)!
author:
- - "Ryan Sydnor (@ryansydnor)"
+- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
@@ -39,13 +37,12 @@ options:
elements: str
default: []
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
+- amazon.aws.common.modules
+- amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Find all existing efs
community.aws.efs_info:
register: result
@@ -58,17 +55,17 @@ EXAMPLES = r'''
- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
community.aws.efs_info:
tags:
- Name: myTestNameTag
+ Name: myTestNameTag
targets:
- - subnet-1a2b3c4d
- - sg-4d3c2b1a
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
register: result
- ansible.builtin.debug:
msg: "{{ result['efs'] }}"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
creation_time:
description: timestamp of creation date
returned: always
@@ -167,8 +164,7 @@ tags:
"name": "my-efs",
"key": "Value"
}
-
-'''
+"""
from collections import defaultdict
@@ -180,90 +176,94 @@ except ImportError:
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class EFSConnection(object):
- STATE_CREATING = 'creating'
- STATE_AVAILABLE = 'available'
- STATE_DELETING = 'deleting'
- STATE_DELETED = 'deleted'
+ STATE_CREATING = "creating"
+ STATE_AVAILABLE = "available"
+ STATE_DELETING = "deleting"
+ STATE_DELETED = "deleted"
def __init__(self, module):
try:
- self.connection = module.client('efs')
+ self.connection = module.client("efs")
self.module = module
except Exception as e:
- module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
+ module.fail_json(msg=f"Failed to connect to AWS: {to_native(e)}")
self.region = module.region
- @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"])
def list_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
- paginator = self.connection.get_paginator('describe_file_systems')
- return paginator.paginate(**kwargs).build_full_result()['FileSystems']
+ paginator = self.connection.get_paginator("describe_file_systems")
+ return paginator.paginate(**kwargs).build_full_result()["FileSystems"]
- @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"])
def get_tags(self, file_system_id):
"""
Returns tag list for selected instance of EFS
"""
- paginator = self.connection.get_paginator('describe_tags')
- return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
+ paginator = self.connection.get_paginator("describe_tags")
+ return boto3_tag_list_to_ansible_dict(
+ paginator.paginate(FileSystemId=file_system_id).build_full_result()["Tags"]
+ )
- @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
+ @AWSRetry.exponential_backoff(catch_extra_error_codes=["ThrottlingException"])
def get_mount_targets(self, file_system_id):
"""
Returns mount targets for selected instance of EFS
"""
- paginator = self.connection.get_paginator('describe_mount_targets')
- return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
+ paginator = self.connection.get_paginator("describe_mount_targets")
+ return paginator.paginate(FileSystemId=file_system_id).build_full_result()["MountTargets"]
- @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
+ @AWSRetry.jittered_backoff(catch_extra_error_codes=["ThrottlingException"])
def get_security_groups(self, mount_target_id):
"""
Returns security groups for selected instance of EFS
"""
- return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
+ return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)["SecurityGroups"]
def get_mount_targets_data(self, file_systems):
for item in file_systems:
- if item['life_cycle_state'] == self.STATE_AVAILABLE:
+ if item["life_cycle_state"] == self.STATE_AVAILABLE:
try:
- mount_targets = self.get_mount_targets(item['file_system_id'])
+ mount_targets = self.get_mount_targets(item["file_system_id"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
for mt in mount_targets:
- item['mount_targets'].append(camel_dict_to_snake_dict(mt))
+ item["mount_targets"].append(camel_dict_to_snake_dict(mt))
return file_systems
def get_security_groups_data(self, file_systems):
for item in file_systems:
- if item['life_cycle_state'] == self.STATE_AVAILABLE:
- for target in item['mount_targets']:
- if target['life_cycle_state'] == self.STATE_AVAILABLE:
+ if item["life_cycle_state"] == self.STATE_AVAILABLE:
+ for target in item["mount_targets"]:
+ if target["life_cycle_state"] == self.STATE_AVAILABLE:
try:
- target['security_groups'] = self.get_security_groups(target['mount_target_id'])
+ target["security_groups"] = self.get_security_groups(target["mount_target_id"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
else:
- target['security_groups'] = []
+ target["security_groups"] = []
else:
- item['tags'] = {}
- item['mount_targets'] = []
+ item["tags"] = {}
+ item["mount_targets"] = []
return file_systems
def get_file_systems(self, file_system_id=None, creation_token=None):
kwargs = dict()
if file_system_id:
- kwargs['FileSystemId'] = file_system_id
+ kwargs["FileSystemId"] = file_system_id
if creation_token:
- kwargs['CreationToken'] = creation_token
+ kwargs["CreationToken"] = creation_token
try:
file_systems = self.list_file_systems(**kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
@@ -271,7 +271,7 @@ class EFSConnection(object):
results = list()
for item in file_systems:
- item['CreationTime'] = str(item['CreationTime'])
+ item["CreationTime"] = str(item["CreationTime"])
"""
In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
@@ -279,18 +279,18 @@ class EFSConnection(object):
AWS documentation is available here:
U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
"""
- item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
- item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/"
+ item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/"
- if 'Timestamp' in item['SizeInBytes']:
- item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if "Timestamp" in item["SizeInBytes"]:
+ item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"])
result = camel_dict_to_snake_dict(item)
- result['tags'] = {}
- result['mount_targets'] = []
+ result["tags"] = {}
+ result["mount_targets"] = []
# Set tags *after* doing camel to snake
- if result['life_cycle_state'] == self.STATE_AVAILABLE:
+ if result["life_cycle_state"] == self.STATE_AVAILABLE:
try:
- result['tags'] = self.get_tags(result['file_system_id'])
+ result["tags"] = self.get_tags(result["file_system_id"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
results.append(result)
@@ -302,13 +302,14 @@ def prefix_to_attr(attr_id):
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
- 'fsmt-': 'mount_target_id',
- 'subnet-': 'subnet_id',
- 'eni-': 'network_interface_id',
- 'sg-': 'security_groups'
+ "fsmt-": "mount_target_id",
+ "subnet-": "subnet_id",
+ "eni-": "network_interface_id",
+ "sg-": "security_groups",
}
- return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
- if str(attr_id).startswith(prefix)], 'ip_address')
+ return first_or_default(
+ [attr_name for (prefix, attr_name) in attr_by_prefix.items() if str(attr_id).startswith(prefix)], "ip_address"
+ )
def first_or_default(items, default=None):
@@ -335,7 +336,7 @@ def has_targets(available, required):
Helper method to determine if mount target requested already exists
"""
grouped = group_list_of_dict(available)
- for (value, field) in required:
+ for value, field in required:
if field not in grouped or value not in grouped[field]:
return False
return True
@@ -358,35 +359,34 @@ def main():
"""
argument_spec = dict(
id=dict(),
- name=dict(aliases=['creation_token']),
+ name=dict(aliases=["creation_token"]),
tags=dict(type="dict", default={}),
- targets=dict(type="list", default=[], elements='str')
+ targets=dict(type="list", default=[], elements="str"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
connection = EFSConnection(module)
- name = module.params.get('name')
- fs_id = module.params.get('id')
- tags = module.params.get('tags')
- targets = module.params.get('targets')
+ name = module.params.get("name")
+ fs_id = module.params.get("id")
+ tags = module.params.get("tags")
+ targets = module.params.get("targets")
file_systems_info = connection.get_file_systems(fs_id, name)
if tags:
- file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
+ file_systems_info = [item for item in file_systems_info if has_tags(item["tags"], tags)]
file_systems_info = connection.get_mount_targets_data(file_systems_info)
file_systems_info = connection.get_security_groups_data(file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
- file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
+ file_systems_info = [item for item in file_systems_info if has_targets(item["mount_targets"], targets)]
module.exit_json(changed=False, efs=file_systems_info)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/efs_tag.py b/ansible_collections/community/aws/plugins/modules/efs_tag.py
index 1529fa944..0f5143471 100644
--- a/ansible_collections/community/aws/plugins/modules/efs_tag.py
+++ b/ansible_collections/community/aws/plugins/modules/efs_tag.py
@@ -1,21 +1,17 @@
#!/usr/bin/python
-"""
-Copyright: (c) 2021, Milan Zink <zeten30@gmail.com>
-GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
+# -*- coding: utf-8 -*-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+# Copyright: (c) 2021, Milan Zink <zeten30@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: efs_tag
version_added: 2.0.0
short_description: create and remove tags on Amazon EFS resources
description:
- - Creates and removes tags for Amazon EFS resources.
- - Resources are referenced by their ID (filesystem or filesystem access point).
+ - Creates and removes tags for Amazon EFS resources.
+ - Resources are referenced by their ID (filesystem or filesystem access point).
author:
- Milan Zink (@zeten30)
options:
@@ -44,13 +40,12 @@ options:
type: bool
default: false
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Ensure tags are present on a resource
community.aws.efs_tag:
resource: fs-123456ab
@@ -71,7 +66,7 @@ EXAMPLES = r'''
resource: fsap-78945ff
state: absent
tags:
- Name: foo
+ Name: foo
purge_tags: true
- name: Remove all tags
@@ -80,9 +75,9 @@ EXAMPLES = r'''
state: absent
tags: {}
purge_tags: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
tags:
description: A dict containing the tags on the resource
returned: always
@@ -95,51 +90,56 @@ removed_tags:
description: A dict of tags that were removed from the resource
returned: If tags were removed
type: dict
-'''
+"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
# Handled by AnsibleAWSModule
pass
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags, AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing
WAIT_RETRY = 5 # how many seconds to wait between propagation status polls
def get_tags(efs, module, resource):
- '''
+ """
Get resource tags
- '''
+ """
try:
- return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)['Tags'])
+ return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"])
except (BotoCoreError, ClientError) as get_tags_error:
- module.fail_json_aws(get_tags_error, msg='Failed to fetch tags for resource {0}'.format(resource))
+ module.fail_json_aws(get_tags_error, msg=f"Failed to fetch tags for resource {resource}")
def main():
- '''
+ """
MAIN
- '''
+ """
argument_spec = dict(
resource=dict(required=True),
- tags=dict(type='dict', required=True, aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent'])
+ tags=dict(type="dict", required=True, aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=False),
+ state=dict(default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- resource = module.params['resource']
- tags = module.params['tags']
- state = module.params['state']
- purge_tags = module.params['purge_tags']
+ resource = module.params["resource"]
+ tags = module.params["tags"]
+ state = module.params["state"]
+ purge_tags = module.params["purge_tags"]
- result = {'changed': False}
+ result = {"changed": False}
- efs = module.client('efs', retry_decorator=AWSRetry.jittered_backoff())
+ efs = module.client("efs", retry_decorator=AWSRetry.jittered_backoff())
current_tags = get_tags(efs, module, resource)
@@ -147,7 +147,7 @@ def main():
remove_tags = {}
- if state == 'absent':
+ if state == "absent":
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
@@ -156,28 +156,30 @@ def main():
remove_tags[key] = current_tags[key]
if remove_tags:
- result['changed'] = True
- result['removed_tags'] = remove_tags
+ result["changed"] = True
+ result["removed_tags"] = remove_tags
if not module.check_mode:
try:
efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys()))
except (BotoCoreError, ClientError) as remove_tag_error:
- module.fail_json_aws(remove_tag_error, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
+ module.fail_json_aws(
+ remove_tag_error, msg=f"Failed to remove tags {remove_tags} from resource {resource}"
+ )
- if state == 'present' and add_tags:
- result['changed'] = True
- result['added_tags'] = add_tags
+ if state == "present" and add_tags:
+ result["changed"] = True
+ result["added_tags"] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
tags = ansible_dict_to_boto3_tag_list(add_tags)
efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags)
except (BotoCoreError, ClientError) as set_tag_error:
- module.fail_json_aws(set_tag_error, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
+ module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {add_tags} on resource {resource}")
- result['tags'] = get_tags(efs, module, resource)
+ result["tags"] = get_tags(efs, module, resource)
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/eks_cluster.py b/ansible_collections/community/aws/plugins/modules/eks_cluster.py
index 18a5055e9..a445def55 100644
--- a/ansible_collections/community/aws/plugins/modules/eks_cluster.py
+++ b/ansible_collections/community/aws/plugins/modules/eks_cluster.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: eks_cluster
version_added: 1.0.0
@@ -63,13 +61,12 @@ options:
default: 1200
type: int
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create an EKS cluster
@@ -89,9 +86,9 @@ EXAMPLES = r'''
name: my_cluster
wait: true
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
arn:
description: ARN of the EKS cluster
returned: when state is present
@@ -163,42 +160,45 @@ version:
returned: when state is present
type: str
sample: '1.10'
-'''
-
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
-from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+"""
try:
- import botocore.exceptions
+ import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def ensure_present(client, module):
- name = module.params.get('name')
- subnets = module.params['subnets']
- groups = module.params['security_groups']
- wait = module.params.get('wait')
+ name = module.params.get("name")
+ subnets = module.params["subnets"]
+ groups = module.params["security_groups"]
+ wait = module.params.get("wait")
cluster = get_cluster(client, module)
try:
- ec2 = module.client('ec2')
- vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId']
+ ec2 = module.client("ec2")
+ vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])["Subnets"][0]["VpcId"]
groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't lookup security groups")
if cluster:
- if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets):
+ if set(cluster["resourcesVpcConfig"]["subnetIds"]) != set(subnets):
module.fail_json(msg="Cannot modify subnets of existing cluster")
- if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups):
+ if set(cluster["resourcesVpcConfig"]["securityGroupIds"]) != set(groups):
module.fail_json(msg="Cannot modify security groups of existing cluster")
- if module.params.get('version') and module.params.get('version') != cluster['version']:
+ if module.params.get("version") and module.params.get("version") != cluster["version"]:
module.fail_json(msg="Cannot modify version of existing cluster")
if wait:
- wait_until(client, module, 'cluster_active')
+ wait_until(client, module, "cluster_active")
# Ensure that fields that are only available for active clusters are
# included in the returned value
cluster = get_cluster(client, module)
@@ -208,24 +208,23 @@ def ensure_present(client, module):
if module.check_mode:
module.exit_json(changed=True)
try:
- params = dict(name=name,
- roleArn=module.params['role_arn'],
- resourcesVpcConfig=dict(
- subnetIds=subnets,
- securityGroupIds=groups),
- )
- if module.params['version']:
- params['version'] = module.params['version']
- if module.params['tags']:
- params['tags'] = module.params['tags']
- cluster = client.create_cluster(**params)['cluster']
+ params = dict(
+ name=name,
+ roleArn=module.params["role_arn"],
+ resourcesVpcConfig=dict(subnetIds=subnets, securityGroupIds=groups),
+ )
+ if module.params["version"]:
+ params["version"] = module.params["version"]
+ if module.params["tags"]:
+ params["tags"] = module.params["tags"]
+ cluster = client.create_cluster(**params)["cluster"]
except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
+ module.fail_json_aws(e, msg=f"Couldn't create cluster {name}")
if wait:
- wait_until(client, module, 'cluster_active')
+ wait_until(client, module, "cluster_active")
# Ensure that fields that are only available for active clusters are
# included in the returned value
cluster = get_cluster(client, module)
@@ -234,44 +233,47 @@ def ensure_present(client, module):
def ensure_absent(client, module):
- name = module.params.get('name')
+ name = module.params.get("name")
existing = get_cluster(client, module)
- wait = module.params.get('wait')
+ wait = module.params.get("wait")
if not existing:
module.exit_json(changed=False)
if not module.check_mode:
try:
- client.delete_cluster(name=module.params['name'])
+ client.delete_cluster(name=module.params["name"])
except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
+ module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name)
+ module.fail_json_aws(e, msg=f"Couldn't delete cluster {name}")
if wait:
- wait_until(client, module, 'cluster_deleted')
+ wait_until(client, module, "cluster_deleted")
module.exit_json(changed=True)
def get_cluster(client, module):
- name = module.params.get('name')
+ name = module.params.get("name")
try:
- return client.describe_cluster(name=name)['cluster']
- except is_boto3_error_code('ResourceNotFoundException'):
+ return client.describe_cluster(name=name)["cluster"]
+ except is_boto3_error_code("ResourceNotFoundException"):
return None
except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
- module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
+ module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS")
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Couldn't get cluster {name}")
-def wait_until(client, module, waiter_name='cluster_active'):
- name = module.params.get('name')
- wait_timeout = module.params.get('wait_timeout')
+def wait_until(client, module, waiter_name="cluster_active"):
+ name = module.params.get("name")
+ wait_timeout = module.params.get("wait_timeout")
waiter = get_waiter(client, waiter_name)
attempts = 1 + int(wait_timeout / waiter.config.delay)
- waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
+ waiter.wait(name=name, WaiterConfig={"MaxAttempts": attempts})
def main():
@@ -279,27 +281,27 @@ def main():
name=dict(required=True),
version=dict(),
role_arn=dict(),
- subnets=dict(type='list', elements='str'),
- security_groups=dict(type='list', elements='str'),
- state=dict(choices=['absent', 'present'], default='present'),
- tags=dict(type='dict', required=False),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=1200, type='int')
+ subnets=dict(type="list", elements="str"),
+ security_groups=dict(type="list", elements="str"),
+ state=dict(choices=["absent", "present"], default="present"),
+ tags=dict(type="dict", required=False),
+ wait=dict(default=False, type="bool"),
+ wait_timeout=dict(default=1200, type="int"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]],
+ required_if=[["state", "present", ["role_arn", "subnets", "security_groups"]]],
supports_check_mode=True,
)
- client = module.client('eks')
+ client = module.client("eks")
- if module.params.get('state') == 'present':
+ if module.params.get("state") == "present":
ensure_present(client, module)
else:
ensure_absent(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py b/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py
index d78cbbe2d..131f0651b 100644
--- a/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py
+++ b/ansible_collections/community/aws/plugins/modules/eks_fargate_profile.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2022 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: eks_fargate_profile
version_added: 4.0.0
@@ -68,14 +66,13 @@ options:
default: 1200
type: int
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create an EKS Fargate Profile
@@ -98,9 +95,9 @@ EXAMPLES = r'''
cluster_name: test_cluster
wait: true
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
fargate_profile_name:
description: Name of Fargate Profile.
returned: when state is present
@@ -164,74 +161,77 @@ status:
sample:
- CREATING
- ACTIVE
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+"""
try:
- import botocore.exceptions
+ import botocore
except ImportError:
pass
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def validate_tags(client, module, fargate_profile):
changed = False
- desired_tags = module.params.get('tags')
+ desired_tags = module.params.get("tags")
if desired_tags is None:
return False
try:
- existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags']
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags'))
+ existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"]
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags"))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to list or compare tags for Fargate Profile {module.params.get('name')}")
if tags_to_remove:
changed = True
if not module.check_mode:
try:
- client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove)
+ client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}")
if tags_to_add:
changed = True
if not module.check_mode:
try:
- client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add)
+ client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}")
return changed
def create_or_update_fargate_profile(client, module):
- name = module.params.get('name')
- subnets = module.params['subnets']
- role_arn = module.params['role_arn']
- cluster_name = module.params['cluster_name']
- selectors = module.params['selectors']
- tags = module.params['tags'] or {}
- wait = module.params.get('wait')
+ name = module.params.get("name")
+ subnets = module.params["subnets"]
+ role_arn = module.params["role_arn"]
+ cluster_name = module.params["cluster_name"]
+ selectors = module.params["selectors"]
+ tags = module.params["tags"] or {}
+ wait = module.params.get("wait")
fargate_profile = get_fargate_profile(client, module, name, cluster_name)
if fargate_profile:
changed = False
- if set(fargate_profile['podExecutionRoleArn']) != set(role_arn):
+ if set(fargate_profile["podExecutionRoleArn"]) != set(role_arn):
module.fail_json(msg="Cannot modify Execution Role")
- if set(fargate_profile['subnets']) != set(subnets):
+ if set(fargate_profile["subnets"]) != set(subnets):
module.fail_json(msg="Cannot modify Subnets")
- if fargate_profile['selectors'] != selectors:
+ if fargate_profile["selectors"] != selectors:
module.fail_json(msg="Cannot modify Selectors")
changed = validate_tags(client, module, fargate_profile)
if wait:
- wait_until(client, module, 'fargate_profile_active', name, cluster_name)
+ wait_until(client, module, "fargate_profile_active", name, cluster_name)
fargate_profile = get_fargate_profile(client, module, name, cluster_name)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(fargate_profile))
@@ -242,29 +242,30 @@ def create_or_update_fargate_profile(client, module):
check_profiles_status(client, module, cluster_name)
try:
- params = dict(fargateProfileName=name,
- podExecutionRoleArn=role_arn,
- subnets=subnets,
- clusterName=cluster_name,
- selectors=selectors,
- tags=tags
- )
+ params = dict(
+ fargateProfileName=name,
+ podExecutionRoleArn=role_arn,
+ subnets=subnets,
+ clusterName=cluster_name,
+ selectors=selectors,
+ tags=tags,
+ )
fargate_profile = client.create_fargate_profile(**params)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name)
+ module.fail_json_aws(e, msg=f"Couldn't create fargate profile {name}")
if wait:
- wait_until(client, module, 'fargate_profile_active', name, cluster_name)
+ wait_until(client, module, "fargate_profile_active", name, cluster_name)
fargate_profile = get_fargate_profile(client, module, name, cluster_name)
module.exit_json(changed=True, **camel_dict_to_snake_dict(fargate_profile))
def delete_fargate_profile(client, module):
- name = module.params.get('name')
- cluster_name = module.params['cluster_name']
+ name = module.params.get("name")
+ cluster_name = module.params["cluster_name"]
existing = get_fargate_profile(client, module, name, cluster_name)
- wait = module.params.get('wait')
+ wait = module.params.get("wait")
if not existing or existing["status"] == "DELETING":
module.exit_json(changed=False)
@@ -273,20 +274,23 @@ def delete_fargate_profile(client, module):
try:
client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name)
+ module.fail_json_aws(e, msg=f"Couldn't delete fargate profile {name}")
if wait:
- wait_until(client, module, 'fargate_profile_deleted', name, cluster_name)
+ wait_until(client, module, "fargate_profile_deleted", name, cluster_name)
module.exit_json(changed=True)
def get_fargate_profile(client, module, name, cluster_name):
try:
- return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)['fargateProfile']
- except is_boto3_error_code('ResourceNotFoundException'):
+ return client.describe_fargate_profile(clusterName=cluster_name, fargateProfileName=name)["fargateProfile"]
+ except is_boto3_error_code("ResourceNotFoundException"):
return None
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Couldn't get fargate profile")
@@ -297,20 +301,24 @@ def check_profiles_status(client, module, cluster_name):
for name in list_profiles["fargateProfileNames"]:
fargate_profile = get_fargate_profile(client, module, name, cluster_name)
- if fargate_profile["status"] == 'CREATING':
- wait_until(client, module, 'fargate_profile_active', fargate_profile["fargateProfileName"], cluster_name)
- elif fargate_profile["status"] == 'DELETING':
- wait_until(client, module, 'fargate_profile_deleted', fargate_profile["fargateProfileName"], cluster_name)
+ if fargate_profile["status"] == "CREATING":
+ wait_until(
+ client, module, "fargate_profile_active", fargate_profile["fargateProfileName"], cluster_name
+ )
+ elif fargate_profile["status"] == "DELETING":
+ wait_until(
+ client, module, "fargate_profile_deleted", fargate_profile["fargateProfileName"], cluster_name
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't not find EKS cluster")
def wait_until(client, module, waiter_name, name, cluster_name):
- wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = module.params.get("wait_timeout")
waiter = get_waiter(client, waiter_name)
attempts = 1 + int(wait_timeout / waiter.config.delay)
try:
- waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={'MaxAttempts': attempts})
+ waiter.wait(clusterName=cluster_name, fargateProfileName=name, WaiterConfig={"MaxAttempts": attempts})
except botocore.exceptions.WaiterError as e:
module.fail_json_aws(e, msg="An error occurred waiting")
@@ -320,34 +328,38 @@ def main():
name=dict(required=True),
cluster_name=dict(required=True),
role_arn=dict(),
- subnets=dict(type='list', elements='str'),
- selectors=dict(type='list', elements='dict', options=dict(
- namespace=dict(type='str'),
- labels=dict(type='dict', default={})
- )),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- state=dict(choices=['absent', 'present'], default='present'),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=1200, type='int')
+ subnets=dict(type="list", elements="str"),
+ selectors=dict(
+ type="list",
+ elements="dict",
+ options=dict(
+ namespace=dict(type="str"),
+ labels=dict(type="dict", default={}),
+ ),
+ ),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ state=dict(choices=["absent", "present"], default="present"),
+ wait=dict(default=False, type="bool"),
+ wait_timeout=dict(default=1200, type="int"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_if=[['state', 'present', ['role_arn', 'subnets', 'selectors']]],
+ required_if=[["state", "present", ["role_arn", "subnets", "selectors"]]],
supports_check_mode=True,
)
try:
- client = module.client('eks')
+ client = module.client("eks")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't connect to AWS")
- if module.params.get('state') == 'present':
+ if module.params.get("state") == "present":
create_or_update_fargate_profile(client, module)
else:
delete_fargate_profile(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py b/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py
index 78979afc2..f9bbb7857 100644
--- a/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py
+++ b/ansible_collections/community/aws/plugins/modules/eks_nodegroup.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2022 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: eks_nodegroup
version_added: 5.3.0
@@ -169,12 +167,11 @@ options:
default: 1200
type: int
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: create nodegroup
@@ -187,29 +184,29 @@ EXAMPLES = r'''
- subnet-qwerty123
- subnet-asdfg456
scaling_config:
- - min_size: 1
- - max_size: 2
- - desired_size: 1
+ min_size: 1
+ max_size: 2
+ desired_size: 1
disk_size: 20
instance_types: 't3.micro'
ami_type: 'AL2_x86_64'
labels:
- - 'teste': 'test'
+ 'teste': 'test'
taints:
- key: 'test'
value: 'test'
effect: 'NO_SCHEDULE'
- capacity_type: 'on_demand'
+ capacity_type: 'ON_DEMAND'
- name: Remove an EKS Nodegrop
community.aws.eks_nodegroup:
name: test_nodegroup
cluster_name: test_cluster
- wait: yes
+ wait: true
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
nodegroup_name:
description: The name associated with an Amazon EKS managed node group.
returned: when state is present
@@ -345,45 +342,49 @@ tags:
type: dict
sample:
foo: bar
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+"""
try:
- import botocore.exceptions
+ import botocore
except ImportError:
pass
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def validate_tags(client, module, nodegroup):
changed = False
- desired_tags = module.params.get('tags')
+ desired_tags = module.params.get("tags")
if desired_tags is None:
return False
try:
- existing_tags = client.list_tags_for_resource(resourceArn=nodegroup['nodegroupArn'])['tags']
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags'))
+ existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"]
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags"))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to list or compare tags for Nodegroup %s.' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to list or compare tags for Nodegroup {module.params.get('name')}.")
if tags_to_remove:
if not module.check_mode:
changed = True
try:
- client.untag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tagKeys=tags_to_remove)
+ client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.")
if tags_to_add:
if not module.check_mode:
changed = True
try:
- client.tag_resource(aws_retry=True, ResourceArn=nodegroup['nodegroupArn'], tags=tags_to_add)
+ client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Nodegroup %s.' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.")
return changed
@@ -404,24 +405,24 @@ def compare_taints(nodegroup_taints, param_taints):
def validate_taints(client, module, nodegroup, param_taints):
changed = False
params = dict()
- params['clusterName'] = nodegroup['clusterName']
- params['nodegroupName'] = nodegroup['nodegroupName']
- params['taints'] = []
- if 'taints' not in nodegroup:
- nodegroup['taints'] = []
- taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup['taints'], param_taints)
+ params["clusterName"] = nodegroup["clusterName"]
+ params["nodegroupName"] = nodegroup["nodegroupName"]
+ params["taints"] = []
+ if "taints" not in nodegroup:
+ nodegroup["taints"] = []
+ taints_to_add_or_update, taints_to_unset = compare_taints(nodegroup["taints"], param_taints)
if taints_to_add_or_update:
- params['taints']['addOrUpdateTaints'] = taints_to_add_or_update
+ params["taints"]["addOrUpdateTaints"] = taints_to_add_or_update
if taints_to_unset:
- params['taints']['removeTaints'] = taints_to_unset
- if params['taints']:
+ params["taints"]["removeTaints"] = taints_to_unset
+ if params["taints"]:
if not module.check_mode:
changed = True
try:
client.update_nodegroup_config(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set taints for Nodegroup %s.' % params['nodegroupName'])
+ module.fail_json_aws(e, msg=f"Unable to set taints for Nodegroup {params['nodegroupName']}.")
return changed
@@ -442,109 +443,114 @@ def compare_labels(nodegroup_labels, param_labels):
def validate_labels(client, module, nodegroup, param_labels):
changed = False
params = dict()
- params['clusterName'] = nodegroup['clusterName']
- params['nodegroupName'] = nodegroup['nodegroupName']
- params['labels'] = {}
- labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup['labels'], param_labels)
+ params["clusterName"] = nodegroup["clusterName"]
+ params["nodegroupName"] = nodegroup["nodegroupName"]
+ params["labels"] = {}
+ labels_to_add_or_update, labels_to_unset = compare_labels(nodegroup["labels"], param_labels)
if labels_to_add_or_update:
- params['labels']['addOrUpdateLabels'] = labels_to_add_or_update
+ params["labels"]["addOrUpdateLabels"] = labels_to_add_or_update
if labels_to_unset:
- params['labels']['removeLabels'] = labels_to_unset
- if params['labels']:
+ params["labels"]["removeLabels"] = labels_to_unset
+ if params["labels"]:
if not module.check_mode:
changed = True
try:
client.update_nodegroup_config(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set labels for Nodegroup %s.' % params['nodegroupName'])
+ module.fail_json_aws(e, msg=f"Unable to set labels for Nodegroup {params['nodegroupName']}.")
return changed
def compare_params(module, params, nodegroup):
- for param in ['nodeRole', 'subnets', 'diskSize', 'instanceTypes', 'amiTypes', 'remoteAccess', 'capacityType']:
+ for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]:
if (param in nodegroup) and (param in params):
- if (nodegroup[param] != params[param]):
- module.fail_json(msg="Cannot modify parameter %s." % param)
- if ('launchTemplate' not in nodegroup) and ('launchTemplate' in params):
+ if nodegroup[param] != params[param]:
+ module.fail_json(msg=f"Cannot modify parameter {param}.")
+ if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params):
module.fail_json(msg="Cannot add Launch Template in this Nodegroup.")
- if nodegroup['updateConfig'] != params['updateConfig']:
+ if nodegroup["updateConfig"] != params["updateConfig"]:
return True
- if nodegroup['scalingConfig'] != params['scalingConfig']:
+ if nodegroup["scalingConfig"] != params["scalingConfig"]:
return True
return False
def compare_params_launch_template(module, params, nodegroup):
- if 'launchTemplate' not in params:
+ if "launchTemplate" not in params:
module.fail_json(msg="Cannot exclude Launch Template in this Nodegroup.")
else:
- for key in ['name', 'id']:
- if (key in params['launchTemplate']) and (params['launchTemplate'][key] != nodegroup['launchTemplate'][key]):
- module.fail_json(msg="Cannot modify Launch Template %s." % key)
- if ('version' in params['launchTemplate']) and (params['launchTemplate']['version'] != nodegroup['launchTemplate']['version']):
+ for key in ["name", "id"]:
+ if (key in params["launchTemplate"]) and (
+ params["launchTemplate"][key] != nodegroup["launchTemplate"][key]
+ ):
+ module.fail_json(msg=f"Cannot modify Launch Template {key}.")
+ if ("version" in params["launchTemplate"]) and (
+ params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"]
+ ):
return True
return False
def create_or_update_nodegroups(client, module):
-
changed = False
params = dict()
- params['nodegroupName'] = module.params['name']
- params['clusterName'] = module.params['cluster_name']
- params['nodeRole'] = module.params['node_role']
- params['subnets'] = module.params['subnets']
- params['tags'] = module.params['tags'] or {}
- if module.params['ami_type'] is not None:
- params['amiType'] = module.params['ami_type']
- if module.params['disk_size'] is not None:
- params['diskSize'] = module.params['disk_size']
- if module.params['instance_types'] is not None:
- params['instanceTypes'] = module.params['instance_types']
- if module.params['launch_template'] is not None:
- params['launchTemplate'] = dict()
- if module.params['launch_template']['id'] is not None:
- params['launchTemplate']['id'] = module.params['launch_template']['id']
- if module.params['launch_template']['version'] is not None:
- params['launchTemplate']['version'] = module.params['launch_template']['version']
- if module.params['launch_template']['name'] is not None:
- params['launchTemplate']['name'] = module.params['launch_template']['name']
- if module.params['release_version'] is not None:
- params['releaseVersion'] = module.params['release_version']
- if module.params['remote_access'] is not None:
- params['remoteAccess'] = dict()
- if module.params['remote_access']['ec2_ssh_key'] is not None:
- params['remoteAccess']['ec2SshKey'] = module.params['remote_access']['ec2_ssh_key']
- if module.params['remote_access']['source_sg'] is not None:
- params['remoteAccess']['sourceSecurityGroups'] = module.params['remote_access']['source_sg']
- if module.params['capacity_type'] is not None:
- params['capacityType'] = module.params['capacity_type'].upper()
- if module.params['labels'] is not None:
- params['labels'] = module.params['labels']
- if module.params['taints'] is not None:
- params['taints'] = module.params['taints']
- if module.params['update_config'] is not None:
- params['updateConfig'] = dict()
- if module.params['update_config']['max_unavailable'] is not None:
- params['updateConfig']['maxUnavailable'] = module.params['update_config']['max_unavailable']
- if module.params['update_config']['max_unavailable_percentage'] is not None:
- params['updateConfig']['maxUnavailablePercentage'] = module.params['update_config']['max_unavailable_percentage']
- if module.params['scaling_config'] is not None:
- params['scalingConfig'] = snake_dict_to_camel_dict(module.params['scaling_config'])
-
- wait = module.params.get('wait')
- nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName'])
+ params["nodegroupName"] = module.params["name"]
+ params["clusterName"] = module.params["cluster_name"]
+ params["nodeRole"] = module.params["node_role"]
+ params["subnets"] = module.params["subnets"]
+ params["tags"] = module.params["tags"] or {}
+ if module.params["ami_type"] is not None:
+ params["amiType"] = module.params["ami_type"]
+ if module.params["disk_size"] is not None:
+ params["diskSize"] = module.params["disk_size"]
+ if module.params["instance_types"] is not None:
+ params["instanceTypes"] = module.params["instance_types"]
+ if module.params["launch_template"] is not None:
+ params["launchTemplate"] = dict()
+ if module.params["launch_template"]["id"] is not None:
+ params["launchTemplate"]["id"] = module.params["launch_template"]["id"]
+ if module.params["launch_template"]["version"] is not None:
+ params["launchTemplate"]["version"] = module.params["launch_template"]["version"]
+ if module.params["launch_template"]["name"] is not None:
+ params["launchTemplate"]["name"] = module.params["launch_template"]["name"]
+ if module.params["release_version"] is not None:
+ params["releaseVersion"] = module.params["release_version"]
+ if module.params["remote_access"] is not None:
+ params["remoteAccess"] = dict()
+ if module.params["remote_access"]["ec2_ssh_key"] is not None:
+ params["remoteAccess"]["ec2SshKey"] = module.params["remote_access"]["ec2_ssh_key"]
+ if module.params["remote_access"]["source_sg"] is not None:
+ params["remoteAccess"]["sourceSecurityGroups"] = module.params["remote_access"]["source_sg"]
+ if module.params["capacity_type"] is not None:
+ params["capacityType"] = module.params["capacity_type"].upper()
+ if module.params["labels"] is not None:
+ params["labels"] = module.params["labels"]
+ if module.params["taints"] is not None:
+ params["taints"] = module.params["taints"]
+ if module.params["update_config"] is not None:
+ params["updateConfig"] = dict()
+ if module.params["update_config"]["max_unavailable"] is not None:
+ params["updateConfig"]["maxUnavailable"] = module.params["update_config"]["max_unavailable"]
+ if module.params["update_config"]["max_unavailable_percentage"] is not None:
+ params["updateConfig"]["maxUnavailablePercentage"] = module.params["update_config"][
+ "max_unavailable_percentage"
+ ]
+ if module.params["scaling_config"] is not None:
+ params["scalingConfig"] = snake_dict_to_camel_dict(module.params["scaling_config"])
+
+ wait = module.params.get("wait")
+ nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"])
if nodegroup:
update_params = dict()
- update_params['clusterName'] = params['clusterName']
- update_params['nodegroupName'] = params['nodegroupName']
+ update_params["clusterName"] = params["clusterName"]
+ update_params["nodegroupName"] = params["nodegroupName"]
- if 'launchTemplate' in nodegroup:
+ if "launchTemplate" in nodegroup:
if compare_params_launch_template(module, params, nodegroup):
- update_params['launchTemplate'] = params['launchTemplate']
+ update_params["launchTemplate"] = params["launchTemplate"]
if not module.check_mode:
try:
client.update_nodegroup_version(**update_params)
@@ -554,10 +560,10 @@ def create_or_update_nodegroups(client, module):
if compare_params(module, params, nodegroup):
try:
- if 'launchTemplate' in update_params:
- update_params.pop('launchTemplate')
- update_params['scalingConfig'] = params['scalingConfig']
- update_params['updateConfig'] = params['updateConfig']
+ if "launchTemplate" in update_params:
+ update_params.pop("launchTemplate")
+ update_params["scalingConfig"] = params["scalingConfig"]
+ update_params["updateConfig"] = params["updateConfig"]
if not module.check_mode:
client.update_nodegroup_config(**update_params)
@@ -569,15 +575,15 @@ def create_or_update_nodegroups(client, module):
changed |= validate_tags(client, module, nodegroup)
- changed |= validate_labels(client, module, nodegroup, params['labels'])
+ changed |= validate_labels(client, module, nodegroup, params["labels"])
- if 'taints' in nodegroup:
- changed |= validate_taints(client, module, nodegroup, params['taints'])
+ if "taints" in nodegroup:
+ changed |= validate_taints(client, module, nodegroup, params["taints"])
if wait:
- wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName'])
+ wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"])
- nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName'])
+ nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"])
module.exit_json(changed=changed, **camel_dict_to_snake_dict(nodegroup))
@@ -587,127 +593,172 @@ def create_or_update_nodegroups(client, module):
try:
nodegroup = client.create_nodegroup(**params)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params['nodegroupName'])
+ module.fail_json_aws(e, msg=f"Couldn't create Nodegroup {params['nodegroupName']}.")
if wait:
- wait_until(client, module, 'nodegroup_active', params['nodegroupName'], params['clusterName'])
- nodegroup = get_nodegroup(client, module, params['nodegroupName'], params['clusterName'])
+ wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"])
+ nodegroup = get_nodegroup(client, module, params["nodegroupName"], params["clusterName"])
module.exit_json(changed=True, **camel_dict_to_snake_dict(nodegroup))
def delete_nodegroups(client, module):
- name = module.params.get('name')
- clusterName = module.params['cluster_name']
+ name = module.params.get("name")
+ clusterName = module.params["cluster_name"]
existing = get_nodegroup(client, module, name, clusterName)
- wait = module.params.get('wait')
- if not existing or existing['status'] == 'DELETING':
- module.exit_json(changed=False, msg='Nodegroup not exists or in DELETING status.')
- if not module.check_mode:
- try:
- client.delete_nodegroup(clusterName=clusterName, nodegroupName=name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name)
+ wait = module.params.get("wait")
+ if not existing:
+ module.exit_json(changed=False, msg="Nodegroup '{name}' does not exist")
+
+ if existing["status"] == "DELETING":
if wait:
- wait_until(client, module, 'nodegroup_deleted', name, clusterName)
+ wait_until(client, module, "nodegroup_deleted", name, clusterName)
+ module.exit_json(changed=False, msg="Nodegroup '{name}' deletion complete")
+ module.exit_json(changed=False, msg="Nodegroup '{name}' already in DELETING state")
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Nodegroup '{name}' deletion would be started (check mode)")
+
+ try:
+ client.delete_nodegroup(clusterName=clusterName, nodegroupName=name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg=f"Couldn't delete Nodegroup '{name}'.")
+
+ if wait:
+ wait_until(client, module, "nodegroup_deleted", name, clusterName)
+ module.exit_json(changed=True, msg="Nodegroup '{name}' deletion complete")
- module.exit_json(changed=True)
+ module.exit_json(changed=True, msg="Nodegroup '{name}' deletion started")
def get_nodegroup(client, module, nodegroup_name, cluster_name):
try:
- return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)['nodegroup']
- except is_boto3_error_code('ResourceNotFoundException'):
+ return client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)["nodegroup"]
+ except is_boto3_error_code("ResourceNotFoundException"):
return None
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name)
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Couldn't get Nodegroup {nodegroup_name}.")
def wait_until(client, module, waiter_name, nodegroup_name, cluster_name):
- wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = module.params.get("wait_timeout")
waiter = get_waiter(client, waiter_name)
attempts = 1 + int(wait_timeout / waiter.config.delay)
try:
- waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={'MaxAttempts': attempts})
+ waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name, WaiterConfig={"MaxAttempts": attempts})
except botocore.exceptions.WaiterError as e:
module.fail_json_aws(e, msg="An error occurred waiting")
def main():
argument_spec = dict(
- name=dict(type='str', required=True),
- cluster_name=dict(type='str', required=True),
+ name=dict(type="str", required=True),
+ cluster_name=dict(type="str", required=True),
node_role=dict(),
- subnets=dict(type='list', elements='str'),
- scaling_config=dict(type='dict', default={'min_size': 1, 'max_size': 2, 'desired_size': 1}, options=dict(
- min_size=dict(type='int'),
- max_size=dict(type='int'),
- desired_size=dict(type='int')
- )),
- disk_size=dict(type='int'),
- instance_types=dict(type='list', elements='str'),
- ami_type=dict(choices=['AL2_x86_64', 'AL2_x86_64_GPU', 'AL2_ARM_64', 'CUSTOM', 'BOTTLEROCKET_ARM_64', 'BOTTLEROCKET_x86_64']),
- remote_access=dict(type='dict', options=dict(
- ec2_ssh_key=dict(no_log=True),
- source_sg=dict(type='list', elements='str')
- )),
- update_config=dict(type='dict', default={'max_unavailable': 1}, options=dict(
- max_unavailable=dict(type='int'),
- max_unavailable_percentage=dict(type='int')
- )),
- labels=dict(type='dict', default={}),
- taints=dict(type='list', elements='dict', default=[], options=dict(
- key=dict(type='str', no_log=False,),
- value=dict(type='str'),
- effect=dict(type='str', choices=['NO_SCHEDULE', 'NO_EXECUTE', 'PREFER_NO_SCHEDULE'])
- )),
- launch_template=dict(type='dict', options=dict(
- name=dict(type='str'),
- version=dict(type='str'),
- id=dict(type='str')
- )),
- capacity_type=dict(choices=['ON_DEMAND', 'SPOT'], default='ON_DEMAND'),
+ subnets=dict(type="list", elements="str"),
+ scaling_config=dict(
+ type="dict",
+ default={"min_size": 1, "max_size": 2, "desired_size": 1},
+ options=dict(
+ min_size=dict(type="int"),
+ max_size=dict(type="int"),
+ desired_size=dict(type="int"),
+ ),
+ ),
+ disk_size=dict(type="int"),
+ instance_types=dict(type="list", elements="str"),
+ ami_type=dict(
+ choices=[
+ "AL2_x86_64",
+ "AL2_x86_64_GPU",
+ "AL2_ARM_64",
+ "CUSTOM",
+ "BOTTLEROCKET_ARM_64",
+ "BOTTLEROCKET_x86_64",
+ ]
+ ),
+ remote_access=dict(
+ type="dict",
+ options=dict(
+ ec2_ssh_key=dict(no_log=True),
+ source_sg=dict(type="list", elements="str"),
+ ),
+ ),
+ update_config=dict(
+ type="dict",
+ default={"max_unavailable": 1},
+ options=dict(
+ max_unavailable=dict(type="int"),
+ max_unavailable_percentage=dict(type="int"),
+ ),
+ ),
+ labels=dict(type="dict", default={}),
+ taints=dict(
+ type="list",
+ elements="dict",
+ default=[],
+ options=dict(
+ key=dict(
+ type="str",
+ no_log=False,
+ ),
+ value=dict(type="str"),
+ effect=dict(type="str", choices=["NO_SCHEDULE", "NO_EXECUTE", "PREFER_NO_SCHEDULE"]),
+ ),
+ ),
+ launch_template=dict(
+ type="dict",
+ options=dict(
+ name=dict(type="str"),
+ version=dict(type="str"),
+ id=dict(type="str"),
+ ),
+ ),
+ capacity_type=dict(choices=["ON_DEMAND", "SPOT"], default="ON_DEMAND"),
release_version=dict(),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- state=dict(choices=['absent', 'present'], default='present'),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=1200, type='int')
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ state=dict(choices=["absent", "present"], default="present"),
+ wait=dict(default=False, type="bool"),
+ wait_timeout=dict(default=1200, type="int"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_if=[['state', 'present', ['node_role', 'subnets']]],
+ required_if=[["state", "present", ["node_role", "subnets"]]],
mutually_exclusive=[
- ('launch_template', 'instance_types'),
- ('launch_template', 'disk_size'),
- ('launch_template', 'remote_access'),
- ('launch_template', 'ami_type')
+ ("launch_template", "instance_types"),
+ ("launch_template", "disk_size"),
+ ("launch_template", "remote_access"),
+ ("launch_template", "ami_type"),
],
supports_check_mode=True,
)
- if module.params['launch_template'] is None:
- if module.params['disk_size'] is None:
- module.params['disk_size'] = 20
- if module.params['ami_type'] is None:
- module.params['ami_type'] = "AL2_x86_64"
- if module.params['instance_types'] is None:
- module.params['instance_types'] = ["t3.medium"]
+ if module.params["launch_template"] is None:
+ if module.params["disk_size"] is None:
+ module.params["disk_size"] = 20
+ if module.params["ami_type"] is None:
+ module.params["ami_type"] = "AL2_x86_64"
+ if module.params["instance_types"] is None:
+ module.params["instance_types"] = ["t3.medium"]
else:
- if (module.params['launch_template']['id'] is None) and (module.params['launch_template']['name'] is None):
- module.exit_json(changed=False, msg='To use launch_template, it is necessary to inform the id or name.')
+ if (module.params["launch_template"]["id"] is None) and (module.params["launch_template"]["name"] is None):
+ module.exit_json(changed=False, msg="To use launch_template, it is necessary to inform the id or name.")
try:
- client = module.client('eks')
+ client = module.client("eks")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't connect to AWS.")
- if module.params.get('state') == 'present':
+ if module.params.get("state") == "present":
create_or_update_nodegroups(client, module)
else:
delete_nodegroups(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache.py b/ansible_collections/community/aws/plugins/modules/elasticache.py
index 454baafe3..d45509cb6 100644
--- a/ansible_collections/community/aws/plugins/modules/elasticache.py
+++ b/ansible_collections/community/aws/plugins/modules/elasticache.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: elasticache
version_added: 1.0.0
@@ -15,7 +12,8 @@ short_description: Manage cache clusters in Amazon ElastiCache
description:
- Manage cache clusters in Amazon ElastiCache.
- Returns information about the specified cache cluster.
-author: "Jim Dalton (@jsdalton)"
+author:
+ - "Jim Dalton (@jsdalton)"
options:
state:
description:
@@ -97,15 +95,15 @@ options:
- Defaults to C(false).
type: bool
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
+RETURN = r""" # """
EXAMPLES = r"""
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
+# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Basic example
community.aws.elasticache:
@@ -113,7 +111,7 @@ EXAMPLES = r"""
state: present
engine: memcached
cache_engine_version: 1.4.14
- node_type: cache.m1.small
+ node_type: cache.m3.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
@@ -130,8 +128,8 @@ EXAMPLES = r"""
community.aws.elasticache:
name: "test-please-delete"
state: rebooted
-
"""
+
from time import sleep
try:
@@ -139,21 +137,34 @@ try:
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-class ElastiCacheManager(object):
- """Handles elasticache creation and destruction"""
+class ElastiCacheManager:
- EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
+ """Handles elasticache creation and destruction"""
- def __init__(self, module, name, engine, cache_engine_version, node_type,
- num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
- cache_security_groups, security_group_ids, zone, wait,
- hard_modify, region, **aws_connect_kwargs):
+ EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"]
+
+ def __init__(
+ self,
+ module,
+ name,
+ engine,
+ cache_engine_version,
+ node_type,
+ num_nodes,
+ cache_port,
+ cache_parameter_group,
+ cache_subnet_group,
+ cache_security_groups,
+ security_group_ids,
+ zone,
+ wait,
+ hard_modify,
+ ):
self.module = module
self.name = name
self.engine = engine.lower()
@@ -169,12 +180,9 @@ class ElastiCacheManager(object):
self.wait = wait
self.hard_modify = hard_modify
- self.region = region
- self.aws_connect_kwargs = aws_connect_kwargs
-
self.changed = False
self.data = None
- self.status = 'gone'
+ self.status = "gone"
self.conn = self._get_elasticache_connection()
self._refresh_data()
@@ -199,32 +207,33 @@ class ElastiCacheManager(object):
def create(self):
"""Create an ElastiCache cluster"""
- if self.status == 'available':
+ if self.status == "available":
return
- if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.status in ["creating", "rebooting", "modifying"]:
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
return
- if self.status == 'deleting':
+ if self.status == "deleting":
if self.wait:
- self._wait_for_status('gone')
+ self._wait_for_status("gone")
else:
- msg = "'%s' is currently deleting. Cannot create."
- self.module.fail_json(msg=msg % self.name)
-
- kwargs = dict(CacheClusterId=self.name,
- NumCacheNodes=self.num_nodes,
- CacheNodeType=self.node_type,
- Engine=self.engine,
- EngineVersion=self.cache_engine_version,
- CacheSecurityGroupNames=self.cache_security_groups,
- SecurityGroupIds=self.security_group_ids,
- CacheParameterGroupName=self.cache_parameter_group,
- CacheSubnetGroupName=self.cache_subnet_group)
+ self.module.fail_json(msg=f"'{self.name}' is currently deleting. Cannot create.")
+
+ kwargs = dict(
+ CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeType=self.node_type,
+ Engine=self.engine,
+ EngineVersion=self.cache_engine_version,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ SecurityGroupIds=self.security_group_ids,
+ CacheParameterGroupName=self.cache_parameter_group,
+ CacheSubnetGroupName=self.cache_subnet_group,
+ )
if self.cache_port is not None:
- kwargs['Port'] = self.cache_port
+ kwargs["Port"] = self.cache_port
if self.zone is not None:
- kwargs['PreferredAvailabilityZone'] = self.zone
+ kwargs["PreferredAvailabilityZone"] = self.zone
try:
self.conn.create_cache_cluster(**kwargs)
@@ -236,45 +245,43 @@ class ElastiCacheManager(object):
self.changed = True
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
- if self.status == 'gone':
+ if self.status == "gone":
return
- if self.status == 'deleting':
+ if self.status == "deleting":
if self.wait:
- self._wait_for_status('gone')
+ self._wait_for_status("gone")
return
- if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.status in ["creating", "rebooting", "modifying"]:
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
else:
- msg = "'%s' is currently %s. Cannot delete."
- self.module.fail_json(msg=msg % (self.name, self.status))
+ self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot delete.")
try:
response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Failed to delete cache cluster")
- cache_cluster_data = response['CacheCluster']
+ cache_cluster_data = response["CacheCluster"]
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
- self._wait_for_status('gone')
+ self._wait_for_status("gone")
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
- msg = "'%s' is %s. Cannot sync."
- self.module.fail_json(msg=msg % (self.name, self.status))
+ self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot sync.")
- if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.status in ["creating", "rebooting", "modifying"]:
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
@@ -282,11 +289,13 @@ class ElastiCacheManager(object):
if self._requires_destroy_and_create():
if not self.hard_modify:
- msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
- self.module.fail_json(msg=msg % self.name)
+ self.module.fail_json(
+ msg=f"'{self.name}' requires destructive modification. 'hard_modify' must be set to true to proceed."
+ )
if not self.wait:
- msg = "'%s' requires destructive modification. 'wait' must be set to true."
- self.module.fail_json(msg=msg % self.name)
+ self.module.fail_json(
+ msg=f"'{self.name}' requires destructive modification. 'wait' must be set to true to proceed."
+ )
self.delete()
self.create()
return
@@ -298,14 +307,16 @@ class ElastiCacheManager(object):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
- self.conn.modify_cache_cluster(CacheClusterId=self.name,
- NumCacheNodes=self.num_nodes,
- CacheNodeIdsToRemove=nodes_to_remove,
- CacheSecurityGroupNames=self.cache_security_groups,
- CacheParameterGroupName=self.cache_parameter_group,
- SecurityGroupIds=self.security_group_ids,
- ApplyImmediately=True,
- EngineVersion=self.cache_engine_version)
+ self.conn.modify_cache_cluster(
+ CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeIdsToRemove=nodes_to_remove,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ CacheParameterGroupName=self.cache_parameter_group,
+ SecurityGroupIds=self.security_group_ids,
+ ApplyImmediately=True,
+ EngineVersion=self.cache_engine_version,
+ )
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Failed to modify cache cluster")
@@ -313,27 +324,24 @@ class ElastiCacheManager(object):
self.changed = True
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
- msg = "'%s' is %s. Cannot reboot."
- self.module.fail_json(msg=msg % (self.name, self.status))
- if self.status == 'rebooting':
+ self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot reboot.")
+ if self.status == "rebooting":
return
- if self.status in ['creating', 'modifying']:
+ if self.status in ["creating", "modifying"]:
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
else:
- msg = "'%s' is currently %s. Cannot reboot."
- self.module.fail_json(msg=msg % (self.name, self.status))
+ self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot reboot.")
# Collect ALL nodes for reboot
- cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]]
try:
- self.conn.reboot_cache_cluster(CacheClusterId=self.name,
- CacheNodeIdsToReboot=cache_node_ids)
+ self.conn.reboot_cache_cluster(CacheClusterId=self.name, CacheNodeIdsToReboot=cache_node_ids)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Failed to reboot cache cluster")
@@ -341,36 +349,28 @@ class ElastiCacheManager(object):
self.changed = True
if self.wait:
- self._wait_for_status('available')
+ self._wait_for_status("available")
def get_info(self):
"""Return basic info about the cache cluster"""
- info = {
- 'name': self.name,
- 'status': self.status
- }
+ info = {"name": self.name, "status": self.status}
if self.data:
- info['data'] = self.data
+ info["data"] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
- status_map = {
- 'creating': 'available',
- 'rebooting': 'available',
- 'modifying': 'available',
- 'deleting': 'gone'
- }
+ status_map = {"creating": "available", "rebooting": "available", "modifying": "available", "deleting": "gone"}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
- msg = "Invalid awaited status. '%s' cannot transition to '%s'"
- self.module.fail_json(msg=msg % (self.status, awaited_status))
+ self.module.fail_json(
+ msg=f"Invalid awaited status. '{self.status}' cannot transition to '{awaited_status}'"
+ )
if awaited_status not in set(status_map.values()):
- msg = "'%s' is not a valid awaited status."
- self.module.fail_json(msg=msg % awaited_status)
+ self.module.fail_json(msg=f"'{awaited_status}' is not a valid awaited status.")
while True:
sleep(1)
@@ -381,27 +381,24 @@ class ElastiCacheManager(object):
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
- modifiable_data = {
- 'NumCacheNodes': self.num_nodes,
- 'EngineVersion': self.cache_engine_version
- }
+ modifiable_data = {"NumCacheNodes": self.num_nodes, "EngineVersion": self.cache_engine_version}
for key, value in modifiable_data.items():
if value is not None and value and self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
- for sg in self.data['CacheSecurityGroups']:
- cache_security_groups.append(sg['CacheSecurityGroupName'])
+ for sg in self.data["CacheSecurityGroups"]:
+ cache_security_groups.append(sg["CacheSecurityGroupName"])
if set(cache_security_groups) != set(self.cache_security_groups):
return True
# check vpc security groups
if self.security_group_ids:
vpc_security_groups = []
- security_groups = self.data.get('SecurityGroups', [])
+ security_groups = self.data.get("SecurityGroups", [])
for sg in security_groups:
- vpc_security_groups.append(sg['SecurityGroupId'])
+ vpc_security_groups.append(sg["SecurityGroupId"])
if set(vpc_security_groups) != set(self.security_group_ids):
return True
@@ -412,13 +409,13 @@ class ElastiCacheManager(object):
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
- 'node_type': self.data['CacheNodeType'],
- 'engine': self.data['Engine'],
- 'cache_port': self._get_port()
+ "node_type": self.data["CacheNodeType"],
+ "engine": self.data["Engine"],
+ "cache_port": self._get_port(),
}
# Only check for modifications if zone is specified
if self.zone is not None:
- unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
+ unmodifiable_data["zone"] = self.data["PreferredAvailabilityZone"]
for key, value in unmodifiable_data.items():
if getattr(self, key) is not None and getattr(self, key) != value:
return True
@@ -427,18 +424,18 @@ class ElastiCacheManager(object):
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
try:
- return self.module.client('elasticache')
+ return self.module.client("elasticache")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Failed to connect to AWS')
+ self.module.fail_json_aws(e, msg="Failed to connect to AWS")
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
- if self.data['Engine'] == 'memcached':
- return self.data['ConfigurationEndpoint']['Port']
- elif self.data['Engine'] == 'redis':
+ if self.data["Engine"] == "memcached":
+ return self.data["ConfigurationEndpoint"]["Port"]
+ elif self.data["Engine"] == "redis":
# Redis only supports a single node (presently) so just use
# the first and only
- return self.data['CacheNodes'][0]['Endpoint']['Port']
+ return self.data["CacheNodes"][0]["Endpoint"]["Port"]
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
@@ -446,104 +443,110 @@ class ElastiCacheManager(object):
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
- except is_boto3_error_code('CacheClusterNotFound'):
+ except is_boto3_error_code("CacheClusterNotFound"):
self.data = None
- self.status = 'gone'
+ self.status = "gone"
return
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
self.module.fail_json_aws(e, msg="Failed to describe cache clusters")
- cache_cluster_data = response['CacheClusters'][0]
+ cache_cluster_data = response["CacheClusters"][0]
self.data = cache_cluster_data
- self.status = self.data['CacheClusterStatus']
+ self.status = self.data["CacheClusterStatus"]
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
- if self.status == 'rebooting cache cluster nodes':
- self.status = 'rebooting'
+ if self.status == "rebooting cache cluster nodes":
+ self.status = "rebooting"
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
- num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
+ num_nodes_to_remove = self.data["NumCacheNodes"] - self.num_nodes
if num_nodes_to_remove <= 0:
return []
if not self.hard_modify:
- msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
- self.module.fail_json(msg=msg % self.name)
+ self.module.fail_json(
+ msg=f"'{self.name}' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
+ )
- cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]]
return cache_node_ids[-num_nodes_to_remove:]
def main():
- """ elasticache ansible module """
+ """elasticache ansible module"""
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent', 'rebooted']),
+ state=dict(required=True, choices=["present", "absent", "rebooted"]),
name=dict(required=True),
- engine=dict(default='memcached'),
+ engine=dict(default="memcached"),
cache_engine_version=dict(default=""),
- node_type=dict(default='cache.t2.small'),
- num_nodes=dict(default=1, type='int'),
+ node_type=dict(default="cache.t2.small"),
+ num_nodes=dict(default=1, type="int"),
# alias for compat with the original PR 1950
- cache_parameter_group=dict(default="", aliases=['parameter_group']),
- cache_port=dict(type='int'),
+ cache_parameter_group=dict(default="", aliases=["parameter_group"]),
+ cache_port=dict(type="int"),
cache_subnet_group=dict(default=""),
- cache_security_groups=dict(default=[], type='list', elements='str'),
- security_group_ids=dict(default=[], type='list', elements='str'),
+ cache_security_groups=dict(default=[], type="list", elements="str"),
+ security_group_ids=dict(default=[], type="list", elements="str"),
zone=dict(),
- wait=dict(default=True, type='bool'),
- hard_modify=dict(type='bool'),
+ wait=dict(default=True, type="bool"),
+ hard_modify=dict(type="bool"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
)
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- name = module.params['name']
- state = module.params['state']
- engine = module.params['engine']
- cache_engine_version = module.params['cache_engine_version']
- node_type = module.params['node_type']
- num_nodes = module.params['num_nodes']
- cache_port = module.params['cache_port']
- cache_subnet_group = module.params['cache_subnet_group']
- cache_security_groups = module.params['cache_security_groups']
- security_group_ids = module.params['security_group_ids']
- zone = module.params['zone']
- wait = module.params['wait']
- hard_modify = module.params['hard_modify']
- cache_parameter_group = module.params['cache_parameter_group']
+ name = module.params["name"]
+ state = module.params["state"]
+ engine = module.params["engine"]
+ cache_engine_version = module.params["cache_engine_version"]
+ node_type = module.params["node_type"]
+ num_nodes = module.params["num_nodes"]
+ cache_port = module.params["cache_port"]
+ cache_subnet_group = module.params["cache_subnet_group"]
+ cache_security_groups = module.params["cache_security_groups"]
+ security_group_ids = module.params["security_group_ids"]
+ zone = module.params["zone"]
+ wait = module.params["wait"]
+ hard_modify = module.params["hard_modify"]
+ cache_parameter_group = module.params["cache_parameter_group"]
if cache_subnet_group and cache_security_groups:
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
- if state == 'present' and not num_nodes:
+ if state == "present" and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
- elasticache_manager = ElastiCacheManager(module, name, engine,
- cache_engine_version, node_type,
- num_nodes, cache_port,
- cache_parameter_group,
- cache_subnet_group,
- cache_security_groups,
- security_group_ids, zone, wait,
- hard_modify, region, **aws_connect_kwargs)
+ elasticache_manager = ElastiCacheManager(
+ module,
+ name,
+ engine,
+ cache_engine_version,
+ node_type,
+ num_nodes,
+ cache_port,
+ cache_parameter_group,
+ cache_subnet_group,
+ cache_security_groups,
+ security_group_ids,
+ zone,
+ wait,
+ hard_modify,
+ )
- if state == 'present':
+ if state == "present":
elasticache_manager.ensure_present()
- elif state == 'absent':
+ elif state == "absent":
elasticache_manager.ensure_absent()
- elif state == 'rebooted':
+ elif state == "rebooted":
elasticache_manager.ensure_rebooted()
- facts_result = dict(changed=elasticache_manager.changed,
- elasticache=elasticache_manager.get_info())
+ facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_info.py b/ansible_collections/community/aws/plugins/modules/elasticache_info.py
index f6c34629e..50a8cb5ff 100644
--- a/ansible_collections/community/aws/plugins/modules/elasticache_info.py
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: elasticache_info
short_description: Retrieve information for AWS ElastiCache clusters
version_added: 1.0.0
@@ -20,21 +18,21 @@ options:
author:
- Will Thames (@willthames)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: obtain all ElastiCache information
community.aws.elasticache_info:
- name: obtain all information for a single ElastiCache cluster
community.aws.elasticache_info:
name: test_elasticache
-'''
+"""
-RETURN = '''
+RETURN = r"""
elasticache_clusters:
description: List of ElastiCache clusters.
returned: always
@@ -402,93 +400,82 @@ elasticache_clusters:
sample:
Application: web
Environment: test
-'''
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
@AWSRetry.exponential_backoff()
def describe_cache_clusters_with_backoff(client, cluster_id=None):
- paginator = client.get_paginator('describe_cache_clusters')
+ paginator = client.get_paginator("describe_cache_clusters")
params = dict(ShowCacheNodeInfo=True)
if cluster_id:
- params['CacheClusterId'] = cluster_id
+ params["CacheClusterId"] = cluster_id
try:
response = paginator.paginate(**params).build_full_result()
- except is_boto3_error_code('CacheClusterNotFound'):
+ except is_boto3_error_code("CacheClusterNotFound"):
return []
- return response['CacheClusters']
+ return response["CacheClusters"]
@AWSRetry.exponential_backoff()
def describe_replication_group_with_backoff(client, replication_group_id):
try:
response = client.describe_replication_groups(ReplicationGroupId=replication_group_id)
- except is_boto3_error_code('ReplicationGroupNotFoundFault'):
+ except is_boto3_error_code("ReplicationGroupNotFoundFault"):
return None
- return response['ReplicationGroups'][0]
+ return response["ReplicationGroups"][0]
@AWSRetry.exponential_backoff()
def get_elasticache_tags_with_backoff(client, cluster_id):
- return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
-
-
-def get_aws_account_id(module):
- try:
- client = module.client('sts')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Can't authorize connection")
-
- try:
- return client.get_caller_identity()['Account']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
+ return client.list_tags_for_resource(ResourceName=cluster_id)["TagList"]
def get_elasticache_clusters(client, module):
region = module.region
try:
- clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
+ clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get("name"))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
- account_id = get_aws_account_id(module)
+ account_id, partition = get_aws_account_info(module)
results = []
for cluster in clusters:
-
cluster = camel_dict_to_snake_dict(cluster)
- arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
+ arn = f"arn:{partition}:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}"
try:
tags = get_elasticache_tags_with_backoff(client, arn)
except is_boto3_error_code("CacheClusterNotFound"):
# e.g: Cluster was listed but is in deleting state
continue
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
+ module.fail_json_aws(e, msg=f"Couldn't get tags for cluster {cluster['cache_cluster_id']}")
- cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
+ cluster["tags"] = boto3_tag_list_to_ansible_dict(tags)
- if cluster.get('replication_group_id', None):
+ if cluster.get("replication_group_id", None):
try:
- replication_group = describe_replication_group_with_backoff(client, cluster['replication_group_id'])
+ replication_group = describe_replication_group_with_backoff(client, cluster["replication_group_id"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain replication group info")
if replication_group is not None:
replication_group = camel_dict_to_snake_dict(replication_group)
- cluster['replication_group'] = replication_group
+ cluster["replication_group"] = replication_group
results.append(cluster)
return results
@@ -500,10 +487,10 @@ def main():
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- client = module.client('elasticache')
+ client = module.client("elasticache")
module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py b/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
index 247dd0bab..fa7f87a2f 100644
--- a/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_parameter_group.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: elasticache_parameter_group
version_added: 1.0.0
@@ -14,11 +12,8 @@ short_description: Manage cache parameter groups in Amazon ElastiCache.
description:
- Manage cache security groups in Amazon ElastiCache.
- Returns information about the specified cache cluster.
-author: "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+author:
+ - "Sloane Hertel (@s-hertel)"
options:
group_family:
@@ -47,13 +42,17 @@ options:
description:
- A user-specified dictionary of parameters to reset or modify for the cache parameter group.
type: dict
-'''
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
----
-- hosts: localhost
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- name: Create, modify and delete a parameter group
+ hosts: localhost
connection: local
tasks:
- name: 'Create a test parameter group'
@@ -66,7 +65,7 @@ EXAMPLES = """
community.aws.elasticache_parameter_group:
name: 'test-param-group'
values:
- activerehashing: yes
+ activerehashing: true
client-output-buffer-limit-normal-hard-limit: 4
state: 'present'
- name: 'Reset all modifiable parameters for the test parameter group'
@@ -79,7 +78,7 @@ EXAMPLES = """
state: 'absent'
"""
-RETURN = """
+RETURN = r"""
elasticache:
description: cache parameter group information and response metadata
returned: always
@@ -115,13 +114,15 @@ from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.six import string_types
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def create(module, conn, name, group_family, description):
- """ Create ElastiCache parameter group. """
+ """Create ElastiCache parameter group."""
try:
- response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description)
+ response = conn.create_cache_parameter_group(
+ CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description
+ )
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to create cache parameter group.")
@@ -129,7 +130,7 @@ def create(module, conn, name, group_family, description):
def delete(module, conn, name):
- """ Delete ElastiCache parameter group. """
+ """Delete ElastiCache parameter group."""
try:
conn.delete_cache_parameter_group(CacheParameterGroupName=name)
response = {}
@@ -140,10 +141,10 @@ def delete(module, conn, name):
def make_current_modifiable_param_dict(module, conn, name):
- """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
+ """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
current_info = get_info(conn, name)
if current_info is False:
- module.fail_json(msg="Could not connect to the cache parameter group %s." % name)
+ module.fail_json(msg=f"Could not connect to the cache parameter group {name}.")
parameters = current_info["Parameters"]
modifiable_params = {}
@@ -157,7 +158,7 @@ def make_current_modifiable_param_dict(module, conn, name):
def check_valid_modification(module, values, modifiable_params):
- """ Check if the parameters and values in values are valid. """
+ """Check if the parameters and values in values are valid."""
changed_with_update = False
for parameter in values:
@@ -165,7 +166,9 @@ def check_valid_modification(module, values, modifiable_params):
# check valid modifiable parameters
if parameter not in modifiable_params:
- module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys()))
+ module.fail_json(
+ msg=f"{parameter} is not a modifiable parameter. Valid parameters to modify are: {modifiable_params.keys()}."
+ )
# check allowed datatype for modified parameters
str_to_type = {"integer": int, "string": string_types}
@@ -180,18 +183,27 @@ def check_valid_modification(module, values, modifiable_params):
if isinstance(new_value, bool):
values[parameter] = 1 if new_value else 0
else:
- module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
- (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
+ module.fail_json(
+ msg=(
+ f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter"
+ f" {parameter}. Expected a type {modifiable_params[parameter][1]}."
+ )
+ )
else:
- module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
- (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
+ module.fail_json(
+ msg=(
+ f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter {parameter}."
+ f" Expected a type {modifiable_params[parameter][1]}."
+ )
+ )
# check allowed values for modifiable parameters
choices = modifiable_params[parameter][0]
if choices:
if not (to_text(new_value) in choices or isinstance(new_value, int)):
- module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." %
- (new_value, parameter, choices))
+ module.fail_json(
+ msg=f"{new_value} is not an allowed value for the parameter {parameter}. Valid parameters are: {choices}."
+ )
# check if a new value is different from current value
if to_text(values[parameter]) != modifiable_params[parameter][2]:
@@ -201,7 +213,7 @@ def check_valid_modification(module, values, modifiable_params):
def check_changed_parameter_values(values, old_parameters, new_parameters):
- """ Checking if the new values are different than the old values. """
+ """Checking if the new values are different than the old values."""
changed_with_update = False
# if the user specified parameters to reset, only check those for change
@@ -221,21 +233,23 @@ def check_changed_parameter_values(values, old_parameters, new_parameters):
def modify(module, conn, name, values):
- """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """
+ """Modify ElastiCache parameter group to reflect the new information if it differs from the current."""
# compares current group parameters with the parameters we've specified to to a value to see if this will change the group
format_parameters = []
for key in values:
value = to_text(values[key])
- format_parameters.append({'ParameterName': key, 'ParameterValue': value})
+ format_parameters.append({"ParameterName": key, "ParameterValue": value})
try:
- response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters)
+ response = conn.modify_cache_parameter_group(
+ CacheParameterGroupName=name, ParameterNameValues=format_parameters
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to modify cache parameter group.")
return response
def reset(module, conn, name, values):
- """ Reset ElastiCache parameter group if the current information is different from the new information. """
+ """Reset ElastiCache parameter group if the current information is different from the new information."""
# used to compare with the reset parameters' dict to see if there have been changes
old_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
@@ -247,12 +261,14 @@ def reset(module, conn, name, values):
format_parameters = []
for key in values:
value = to_text(values[key])
- format_parameters.append({'ParameterName': key, 'ParameterValue': value})
+ format_parameters.append({"ParameterName": key, "ParameterValue": value})
else:
all_parameters = True
try:
- response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters)
+ response = conn.reset_cache_parameter_group(
+ CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to reset cache parameter group.")
@@ -264,7 +280,7 @@ def reset(module, conn, name, values):
def get_info(conn, name):
- """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """
+ """Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access."""
try:
data = conn.describe_cache_parameters(CacheParameterGroupName=name)
return data
@@ -274,36 +290,50 @@ def get_info(conn, name):
def main():
argument_spec = dict(
- group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0', 'redis6.x']),
- name=dict(required=True, type='str'),
- description=dict(default='', type='str'),
- state=dict(required=True, choices=['present', 'absent', 'reset']),
- values=dict(type='dict'),
+ group_family=dict(
+ type="str",
+ choices=[
+ "memcached1.4",
+ "memcached1.5",
+ "redis2.6",
+ "redis2.8",
+ "redis3.2",
+ "redis4.0",
+ "redis5.0",
+ "redis6.x",
+ ],
+ ),
+ name=dict(required=True, type="str"),
+ description=dict(default="", type="str"),
+ state=dict(required=True, choices=["present", "absent", "reset"]),
+ values=dict(type="dict"),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- parameter_group_family = module.params.get('group_family')
- parameter_group_name = module.params.get('name')
- group_description = module.params.get('description')
- state = module.params.get('state')
- values = module.params.get('values')
+ parameter_group_family = module.params.get("group_family")
+ parameter_group_name = module.params.get("name")
+ group_description = module.params.get("description")
+ state = module.params.get("state")
+ values = module.params.get("values")
try:
- connection = module.client('elasticache')
+ connection = module.client("elasticache")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
exists = get_info(connection, parameter_group_name)
# check that the needed requirements are available
- if state == 'present' and not (exists or parameter_group_family):
+ if state == "present" and not (exists or parameter_group_family):
module.fail_json(msg="Creating a group requires a family group.")
- elif state == 'reset' and not exists:
- module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name)
+ elif state == "reset" and not exists:
+ module.fail_json(
+ msg=f"No group {parameter_group_name} to reset. Please create the group before using the state 'reset'."
+ )
# Taking action
changed = False
- if state == 'present':
+ if state == "present":
if exists:
# confirm that the group exists without any actions
if not values:
@@ -316,19 +346,21 @@ def main():
response = modify(module, connection, parameter_group_name, values)
# create group
else:
- response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description)
+ response, changed = create(
+ module, connection, parameter_group_name, parameter_group_family, group_description
+ )
if values:
modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
changed, values = check_valid_modification(module, values, modifiable_params)
response = modify(module, connection, parameter_group_name, values)
- elif state == 'absent':
+ elif state == "absent":
if exists:
# delete group
response, changed = delete(module, connection, parameter_group_name)
else:
response = {}
changed = False
- elif state == 'reset':
+ elif state == "reset":
response, changed = reset(module, connection, parameter_group_name, values)
facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response))
@@ -336,5 +368,5 @@ def main():
module.exit_json(**facts_result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py b/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
index fa18b80c0..0816527fb 100644
--- a/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_snapshot.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: elasticache_snapshot
version_added: 1.0.0
@@ -14,11 +12,8 @@ short_description: Manage cache snapshots in Amazon ElastiCache
description:
- Manage cache snapshots in Amazon ElastiCache.
- Returns information about the specified snapshot.
-author: "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+author:
+ - "Sloane Hertel (@s-hertel)"
options:
name:
description:
@@ -47,11 +42,14 @@ options:
description:
- The s3 bucket to which the snapshot is exported.
type: str
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: 'Create a snapshot'
community.aws.elasticache_snapshot:
@@ -61,7 +59,7 @@ EXAMPLES = """
replication_id: '{{ replication }}'
"""
-RETURN = """
+RETURN = r"""
response_metadata:
description: response metadata about the snapshot
returned: always
@@ -117,18 +115,19 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def create(module, connection, replication_id, cluster_id, name):
- """ Create an ElastiCache backup. """
+ """Create an ElastiCache backup."""
try:
- response = connection.create_snapshot(ReplicationGroupId=replication_id,
- CacheClusterId=cluster_id,
- SnapshotName=name)
+ response = connection.create_snapshot(
+ ReplicationGroupId=replication_id, CacheClusterId=cluster_id, SnapshotName=name
+ )
changed = True
- except is_boto3_error_code('SnapshotAlreadyExistsFault'):
+ except is_boto3_error_code("SnapshotAlreadyExistsFault"):
response = {}
changed = False
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
@@ -137,11 +136,9 @@ def create(module, connection, replication_id, cluster_id, name):
def copy(module, connection, name, target, bucket):
- """ Copy an ElastiCache backup. """
+ """Copy an ElastiCache backup."""
try:
- response = connection.copy_snapshot(SourceSnapshotName=name,
- TargetSnapshotName=target,
- TargetBucket=bucket)
+ response = connection.copy_snapshot(SourceSnapshotName=name, TargetSnapshotName=target, TargetBucket=bucket)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to copy the snapshot.")
@@ -149,16 +146,20 @@ def copy(module, connection, name, target, bucket):
def delete(module, connection, name):
- """ Delete an ElastiCache backup. """
+ """Delete an ElastiCache backup."""
try:
response = connection.delete_snapshot(SnapshotName=name)
changed = True
- except is_boto3_error_code('SnapshotNotFoundFault'):
+ except is_boto3_error_code("SnapshotNotFoundFault"):
response = {}
changed = False
- except is_boto3_error_code('InvalidSnapshotState'): # pylint: disable=duplicate-except
- module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion."
- "You may need to wait a few minutes.")
+ except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except
+ module.fail_json(
+ msg=(
+ "Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow"
+ " deletion.You may need to wait a few minutes."
+ )
+ )
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Unable to delete the snapshot.")
return response, changed
@@ -166,38 +167,38 @@ def delete(module, connection, name):
def main():
argument_spec = dict(
- name=dict(required=True, type='str'),
- state=dict(required=True, type='str', choices=['present', 'absent', 'copy']),
- replication_id=dict(type='str'),
- cluster_id=dict(type='str'),
- target=dict(type='str'),
- bucket=dict(type='str'),
+ name=dict(required=True, type="str"),
+ state=dict(required=True, type="str", choices=["present", "absent", "copy"]),
+ replication_id=dict(type="str"),
+ cluster_id=dict(type="str"),
+ target=dict(type="str"),
+ bucket=dict(type="str"),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- name = module.params.get('name')
- state = module.params.get('state')
- replication_id = module.params.get('replication_id')
- cluster_id = module.params.get('cluster_id')
- target = module.params.get('target')
- bucket = module.params.get('bucket')
+ name = module.params.get("name")
+ state = module.params.get("state")
+ replication_id = module.params.get("replication_id")
+ cluster_id = module.params.get("cluster_id")
+ target = module.params.get("target")
+ bucket = module.params.get("bucket")
try:
- connection = module.client('elasticache')
+ connection = module.client("elasticache")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
changed = False
response = {}
- if state == 'present':
+ if state == "present":
if not all((replication_id, cluster_id)):
module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'")
response, changed = create(module, connection, replication_id, cluster_id, name)
- elif state == 'absent':
+ elif state == "absent":
response, changed = delete(module, connection, name)
- elif state == 'copy':
+ elif state == "copy":
if not all((target, bucket)):
module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.")
response, changed = copy(module, connection, name, target, bucket)
@@ -207,5 +208,5 @@ def main():
module.exit_json(**facts_result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py b/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
index 0f5f5e75e..f7740e696 100644
--- a/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
+++ b/ansible_collections/community/aws/plugins/modules/elasticache_subnet_group.py
@@ -1,18 +1,16 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: elasticache_subnet_group
version_added: 1.0.0
short_description: manage ElastiCache subnet groups
description:
- - Creates, modifies, and deletes ElastiCache subnet groups.
+ - Creates, modifies, and deletes ElastiCache subnet groups.
options:
state:
description:
@@ -40,12 +38,12 @@ options:
author:
- "Tim Mahoney (@timmahoney)"
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Add or change a subnet group
community.aws.elasticache_subnet_group:
state: present
@@ -59,9 +57,9 @@ EXAMPLES = r'''
community.aws.elasticache_subnet_group:
state: absent
name: norwegian-blue
-'''
+"""
-RETURN = r'''
+RETURN = r"""
cache_subnet_group:
description: Description of the Elasticache Subnet Group.
returned: always
@@ -95,7 +93,7 @@ cache_subnet_group:
sample:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
-'''
+"""
try:
import botocore
@@ -104,9 +102,10 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_subnet_group(name):
@@ -114,10 +113,13 @@ def get_subnet_group(name):
groups = client.describe_cache_subnet_groups(
aws_retry=True,
CacheSubnetGroupName=name,
- )['CacheSubnetGroups']
- except is_boto3_error_code('CacheSubnetGroupNotFoundFault'):
+ )["CacheSubnetGroups"]
+ except is_boto3_error_code("CacheSubnetGroupNotFoundFault"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to describe subnet group")
if not groups:
@@ -131,19 +133,18 @@ def get_subnet_group(name):
subnet_group = camel_dict_to_snake_dict(groups[0])
- subnet_group['name'] = subnet_group['cache_subnet_group_name']
- subnet_group['description'] = subnet_group['cache_subnet_group_description']
+ subnet_group["name"] = subnet_group["cache_subnet_group_name"]
+ subnet_group["description"] = subnet_group["cache_subnet_group_description"]
- subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets'])
- subnet_group['subnet_ids'] = subnet_ids
+ subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"])
+ subnet_group["subnet_ids"] = subnet_ids
return subnet_group
def create_subnet_group(name, description, subnets):
-
if not subnets:
- module.fail_json(msg='At least one subnet must be provided when creating a subnet group')
+ module.fail_json(msg="At least one subnet must be provided when creating a subnet group")
if module.check_mode:
return True
@@ -164,13 +165,13 @@ def create_subnet_group(name, description, subnets):
def update_subnet_group(subnet_group, name, description, subnets):
update_params = dict()
- if description and subnet_group['description'] != description:
- update_params['CacheSubnetGroupDescription'] = description
+ if description and subnet_group["description"] != description:
+ update_params["CacheSubnetGroupDescription"] = description
if subnets:
- old_subnets = set(subnet_group['subnet_ids'])
+ old_subnets = set(subnet_group["subnet_ids"])
new_subnets = set(subnets)
if old_subnets != new_subnets:
- update_params['SubnetIds'] = list(subnets)
+ update_params["SubnetIds"] = list(subnets)
if not update_params:
return False
@@ -191,7 +192,6 @@ def update_subnet_group(subnet_group, name, description, subnets):
def delete_subnet_group(name):
-
if module.check_mode:
return True
@@ -201,20 +201,23 @@ def delete_subnet_group(name):
CacheSubnetGroupName=name,
)
return True
- except is_boto3_error_code('CacheSubnetGroupNotFoundFault'):
+ except is_boto3_error_code("CacheSubnetGroupNotFoundFault"):
# AWS is "eventually consistent", cope with the race conditions where
# deletion hadn't completed when we ran describe
return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to delete subnet group")
def main():
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
+ state=dict(default="present", choices=["present", "absent"]),
name=dict(required=True),
description=dict(required=False),
- subnets=dict(required=False, type='list', elements='str'),
+ subnets=dict(required=False, type="list", elements="str"),
)
global module
@@ -225,17 +228,17 @@ def main():
supports_check_mode=True,
)
- state = module.params.get('state')
- name = module.params.get('name').lower()
- description = module.params.get('description')
- subnets = module.params.get('subnets')
+ state = module.params.get("state")
+ name = module.params.get("name").lower()
+ description = module.params.get("description")
+ subnets = module.params.get("subnets")
- client = module.client('elasticache', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("elasticache", retry_decorator=AWSRetry.jittered_backoff())
subnet_group = get_subnet_group(name)
changed = False
- if state == 'present':
+ if state == "present":
if not subnet_group:
result = create_subnet_group(name, description, subnets)
changed |= result
@@ -252,5 +255,5 @@ def main():
module.exit_json(changed=changed, cache_subnet_group=subnet_group)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py b/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py
index b5b32c178..1aaa4c4d8 100644
--- a/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py
+++ b/ansible_collections/community/aws/plugins/modules/elasticbeanstalk_app.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: elasticbeanstalk_app
version_added: 1.0.0
@@ -43,12 +41,12 @@ author:
- Harpreet Singh (@hsingh)
- Stephen Granger (@viper233)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create or update an application
- community.aws.elasticbeanstalk_app:
app_name: Sample_App
@@ -59,10 +57,9 @@ EXAMPLES = '''
- community.aws.elasticbeanstalk_app:
app_name: Sample_App
state: absent
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
app:
description: Beanstalk application.
returned: always
@@ -83,15 +80,16 @@ output:
returned: in check mode
type: str
sample: App is up-to-date
-'''
+"""
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def describe_app(ebs, app_name, module):
@@ -113,24 +111,24 @@ def list_apps(ebs, app_name, module):
def check_app(ebs, app, module):
- app_name = module.params['app_name']
- description = module.params['description']
- state = module.params['state']
- terminate_by_force = module.params['terminate_by_force']
+ app_name = module.params["app_name"]
+ description = module.params["description"]
+ state = module.params["state"]
+ terminate_by_force = module.params["terminate_by_force"]
result = {}
- if state == 'present' and app is None:
+ if state == "present" and app is None:
result = dict(changed=True, output="App would be created")
- elif state == 'present' and app.get("Description", None) != description:
+ elif state == "present" and app.get("Description", None) != description:
result = dict(changed=True, output="App would be updated", app=app)
- elif state == 'present' and app.get("Description", None) == description:
+ elif state == "present" and app.get("Description", None) == description:
result = dict(changed=False, output="App is up-to-date", app=app)
- elif state == 'absent' and app is None:
+ elif state == "absent" and app is None:
result = dict(changed=False, output="App does not exist", app={})
- elif state == 'absent' and app is not None:
+ elif state == "absent" and app is not None:
result = dict(changed=True, output="App will be deleted", app=app)
- elif state == 'absent' and app is not None and terminate_by_force is True:
+ elif state == "absent" and app is not None and terminate_by_force is True:
result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app)
module.exit_json(**result)
@@ -146,37 +144,36 @@ def filter_empty(**kwargs):
def main():
argument_spec = dict(
- app_name=dict(aliases=['name'], type='str', required=False),
+ app_name=dict(aliases=["name"], type="str", required=False),
description=dict(),
- state=dict(choices=['present', 'absent'], default='present'),
- terminate_by_force=dict(type='bool', default=False, required=False)
+ state=dict(choices=["present", "absent"], default="present"),
+ terminate_by_force=dict(type="bool", default=False, required=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- app_name = module.params['app_name']
- description = module.params['description']
- state = module.params['state']
- terminate_by_force = module.params['terminate_by_force']
+ app_name = module.params["app_name"]
+ description = module.params["description"]
+ state = module.params["state"]
+ terminate_by_force = module.params["terminate_by_force"]
if app_name is None:
module.fail_json(msg='Module parameter "app_name" is required')
result = {}
- ebs = module.client('elasticbeanstalk')
+ ebs = module.client("elasticbeanstalk")
app = describe_app(ebs, app_name, module)
if module.check_mode:
check_app(ebs, app, module)
- module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.')
+ module.fail_json(msg="ASSERTION FAILURE: check_app() should not return control.")
- if state == 'present':
+ if state == "present":
if app is None:
try:
- create_app = ebs.create_application(**filter_empty(ApplicationName=app_name,
- Description=description))
+ create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not create application")
@@ -201,7 +198,7 @@ def main():
else:
if app is None:
- result = dict(changed=False, output='Application not found', app={})
+ result = dict(changed=False, output="Application not found", app={})
else:
try:
if terminate_by_force:
@@ -210,9 +207,12 @@ def main():
else:
ebs.delete_application(ApplicationName=app_name)
changed = True
- except is_boto3_error_message('It is currently pending deletion'):
+ except is_boto3_error_message("It is currently pending deletion"):
changed = False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Cannot terminate app")
result = dict(changed=changed, app=app)
@@ -220,5 +220,5 @@ def main():
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py b/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
index 4cbeb9589..5329e5b81 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_classic_lb_info.py
@@ -1,29 +1,16 @@
#!/usr/bin/python
-#
-# This is a free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This Ansible library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this library. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
---
module: elb_classic_lb_info
version_added: 1.0.0
short_description: Gather information about EC2 Elastic Load Balancers in AWS
description:
- - Gather information about EC2 Elastic Load Balancers in AWS
+ - Gather information about EC2 Elastic Load Balancers in AWS
author:
- "Michael Schultz (@mjschultz)"
- "Fernando Jose Pando (@nand0p)"
@@ -35,12 +22,12 @@ options:
elements: str
default: []
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
@@ -63,17 +50,16 @@ EXAMPLES = r'''
# Gather information about a set of ELBs
- community.aws.elb_classic_lb_info:
names:
- - frontend-prod-elb
- - backend-prod-elb
+ - frontend-prod-elb
+ - backend-prod-elb
register: elb_info
- ansible.builtin.debug:
msg: "{{ item.dns_name }}"
loop: "{{ elb_info.elbs }}"
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
elbs:
description: a list of load balancers
returned: always
@@ -137,20 +123,21 @@ elbs:
- subnet-XXXXXXXX
tags: {}
vpc_id: vpc-c248fda4
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- AWSRetry,
- camel_dict_to_snake_dict,
- boto3_tag_list_to_ansible_dict
-)
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
MAX_AWS_RETRIES = 5
MAX_AWS_DELAY = 5
@@ -172,63 +159,79 @@ def list_elbs(connection, load_balancer_names):
def describe_elb(connection, lb):
description = camel_dict_to_snake_dict(lb)
- name = lb['LoadBalancerName']
- instances = lb.get('Instances', [])
- description['tags'] = get_tags(connection, name)
- description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService')
- description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService')
- description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown')
- description['attributes'] = get_lb_attributes(connection, name)
+ name = lb["LoadBalancerName"]
+ instances = lb.get("Instances", [])
+ description["tags"] = get_tags(connection, name)
+ description["instances_inservice"], description["instances_inservice_count"] = lb_instance_health(
+ connection, name, instances, "InService"
+ )
+ description["instances_outofservice"], description["instances_outofservice_count"] = lb_instance_health(
+ connection, name, instances, "OutOfService"
+ )
+ description["instances_unknownservice"], description["instances_unknownservice_count"] = lb_instance_health(
+ connection, name, instances, "Unknown"
+ )
+ description["attributes"] = get_lb_attributes(connection, name)
return description
@AWSRetry.jittered_backoff()
def get_all_lb(connection):
- paginator = connection.get_paginator('describe_load_balancers')
- return paginator.paginate().build_full_result()['LoadBalancerDescriptions']
+ paginator = connection.get_paginator("describe_load_balancers")
+ return paginator.paginate().build_full_result()["LoadBalancerDescriptions"]
def get_lb(connection, load_balancer_name):
try:
- return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])['LoadBalancerDescriptions'][0]
- except is_boto3_error_code('LoadBalancerNotFound'):
+ return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])[
+ "LoadBalancerDescriptions"
+ ][0]
+ except is_boto3_error_code("LoadBalancerNotFound"):
return []
def get_lb_attributes(connection, load_balancer_name):
- attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get('LoadBalancerAttributes', {})
+ attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get(
+ "LoadBalancerAttributes", {}
+ )
return camel_dict_to_snake_dict(attributes)
def get_tags(connection, load_balancer_name):
- tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])['TagDescriptions']
+ tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])["TagDescriptions"]
if not tags:
return {}
- return boto3_tag_list_to_ansible_dict(tags[0]['Tags'])
+ return boto3_tag_list_to_ansible_dict(tags[0]["Tags"])
def lb_instance_health(connection, load_balancer_name, instances, state):
- instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', [])
- instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state]
+ instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get(
+ "InstanceStates", []
+ )
+ instate = [instance["InstanceId"] for instance in instance_states if instance["State"] == state]
return instate, len(instate)
def main():
argument_spec = dict(
- names=dict(default=[], type='list', elements='str')
+ names=dict(default=[], type="list", elements="str"),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
- connection = module.client('elb', retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY))
+ connection = module.client(
+ "elb", retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)
+ )
try:
- elbs = list_elbs(connection, module.params.get('names'))
+ elbs = list_elbs(connection, module.params.get("names"))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get load balancer information.")
module.exit_json(elbs=elbs)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_instance.py b/ansible_collections/community/aws/plugins/modules/elb_instance.py
index ecea32a63..6489a86bc 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_instance.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_instance.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: elb_instance
version_added: 1.0.0
@@ -15,7 +13,8 @@ description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Will be marked changed when called only if there are ELBs found to operate on.
-author: "John Jarvis (@jarv)"
+author:
+ - "John Jarvis (@jarv)"
options:
state:
description:
@@ -55,13 +54,13 @@ options:
default: 0
type: int
notes:
-- The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release
- 4.0.0 is no longer set.
+ - The ec2_elbs fact previously set by this module was deprecated in release 2.1.0 and since release
+ 4.0.0 is no longer set.
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
EXAMPLES = r"""
# basic pre_task and post_task example
@@ -83,22 +82,23 @@ post_tasks:
delegate_to: localhost
"""
-RETURN = '''
+RETURN = r"""
updated_elbs:
description: A list of ELB names that the instance has been added to or removed from.
returned: always
type: list
elements: str
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class ElbManager:
@@ -107,9 +107,9 @@ class ElbManager:
def __init__(self, module, instance_id=None, ec2_elbs=None):
retry_decorator = AWSRetry.jittered_backoff()
self.module = module
- self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator)
- self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator)
- self.client_elb = module.client('elb', retry_decorator=retry_decorator)
+ self.client_asg = module.client("autoscaling", retry_decorator=retry_decorator)
+ self.client_ec2 = module.client("ec2", retry_decorator=retry_decorator)
+ self.client_elb = module.client("elb", retry_decorator=retry_decorator)
self.instance_id = instance_id
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
@@ -120,11 +120,11 @@ class ElbManager:
to report it out-of-service"""
for lb in self.lbs:
- instance_ids = [i['InstanceId'] for i in lb['Instances']]
+ instance_ids = [i["InstanceId"] for i in lb["Instances"]]
if self.instance_id not in instance_ids:
continue
- self.updated_elbs.add(lb['LoadBalancerName'])
+ self.updated_elbs.add(lb["LoadBalancerName"])
if self.module.check_mode:
self.changed = True
@@ -133,12 +133,13 @@ class ElbManager:
try:
self.client_elb.deregister_instances_from_load_balancer(
aws_retry=True,
- LoadBalancerName=lb['LoadBalancerName'],
+ LoadBalancerName=lb["LoadBalancerName"],
Instances=[{"InstanceId": self.instance_id}],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer',
- load_balancer=lb, instance=self.instance_id)
+ self.module.fail_json_aws(
+ e, "Failed to deregister instance from load balancer", load_balancer=lb, instance=self.instance_id
+ )
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
@@ -147,17 +148,17 @@ class ElbManager:
if wait:
for lb in self.lbs:
- self._await_elb_instance_state(lb, 'Deregistered', timeout)
+ self._await_elb_instance_state(lb, "Deregistered", timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
- instance_ids = [i['InstanceId'] for i in lb['Instances']]
+ instance_ids = [i["InstanceId"] for i in lb["Instances"]]
if self.instance_id in instance_ids:
continue
- self.updated_elbs.add(lb['LoadBalancerName'])
+ self.updated_elbs.add(lb["LoadBalancerName"])
if enable_availability_zone:
self.changed |= self._enable_availailability_zone(lb)
@@ -169,31 +170,32 @@ class ElbManager:
try:
self.client_elb.register_instances_with_load_balancer(
aws_retry=True,
- LoadBalancerName=lb['LoadBalancerName'],
+ LoadBalancerName=lb["LoadBalancerName"],
Instances=[{"InstanceId": self.instance_id}],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, 'Failed to register instance with load balancer',
- load_balancer=lb, instance=self.instance_id)
+ self.module.fail_json_aws(
+ e, "Failed to register instance with load balancer", load_balancer=lb, instance=self.instance_id
+ )
self.changed = True
if wait:
for lb in self.lbs:
- self._await_elb_instance_state(lb, 'InService', timeout)
+ self._await_elb_instance_state(lb, "InService", timeout)
@AWSRetry.jittered_backoff()
def _describe_elbs(self, **params):
- paginator = self.client_elb.get_paginator('describe_load_balancers')
+ paginator = self.client_elb.get_paginator("describe_load_balancers")
results = paginator.paginate(**params).build_full_result()
- return results['LoadBalancerDescriptions']
+ return results["LoadBalancerDescriptions"]
def exists(self, lbtest):
- """ Verify that the named ELB actually exists """
+ """Verify that the named ELB actually exists"""
found = False
for lb in self.lbs:
- if lb['LoadBalancerName'] == lbtest:
+ if lb["LoadBalancerName"] == lbtest:
found = True
break
return found
@@ -203,9 +205,9 @@ class ElbManager:
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
- desired_zone = instance['Placement']['AvailabilityZone']
+ desired_zone = instance["Placement"]["AvailabilityZone"]
- if desired_zone in lb['AvailabilityZones']:
+ if desired_zone in lb["AvailabilityZones"]:
return False
if self.module.check_mode:
@@ -214,12 +216,11 @@ class ElbManager:
try:
self.client_elb.enable_availability_zones_for_load_balancer(
aws_retry=True,
- LoadBalancerName=lb['LoadBalancerName'],
+ LoadBalancerName=lb["LoadBalancerName"],
AvailabilityZones=[desired_zone],
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers',
- load_balancer=lb, zone=desired_zone)
+ self.module.fail_json_aws(e, "Failed to enable AZ on load balancers", load_balancer=lb, zone=desired_zone)
return True
@@ -233,27 +234,29 @@ class ElbManager:
if awaited_state == initial_state:
return
- if awaited_state == 'InService':
- waiter = self.client_elb.get_waiter('instance_in_service')
- elif awaited_state == 'Deregistered':
- waiter = self.client_elb.get_waiter('instance_deregistered')
- elif awaited_state == 'OutOfService':
- waiter = self.client_elb.get_waiter('instance_deregistered')
+ if awaited_state == "InService":
+ waiter = self.client_elb.get_waiter("instance_in_service")
+ elif awaited_state == "Deregistered":
+ waiter = self.client_elb.get_waiter("instance_deregistered")
+ elif awaited_state == "OutOfService":
+ waiter = self.client_elb.get_waiter("instance_deregistered")
else:
- self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state)
+ self.module.fail_json(msg="Could not wait for unknown state", awaited_state=awaited_state)
try:
waiter.wait(
- LoadBalancerName=lb['LoadBalancerName'],
+ LoadBalancerName=lb["LoadBalancerName"],
Instances=[{"InstanceId": self.instance_id}],
- WaiterConfig={'Delay': 1, 'MaxAttempts': timeout},
+ WaiterConfig={"Delay": 1, "MaxAttempts": timeout},
)
except botocore.exceptions.WaiterError as e:
- self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state',
- awaited_state=awaited_state)
+ self.module.fail_json_aws(
+ e, msg="Timeout waiting for instance to reach desired state", awaited_state=awaited_state
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state',
- awaited_state=awaited_state)
+ self.module.fail_json_aws(
+ e, msg="Error while waiting for instance to reach desired state", awaited_state=awaited_state
+ )
return
@@ -265,18 +268,21 @@ class ElbManager:
try:
status = self.client_elb.describe_instance_health(
aws_retry=True,
- LoadBalancerName=lb['LoadBalancerName'],
- Instances=[{'InstanceId': self.instance_id}],
- )['InstanceStates']
- except is_boto3_error_code('InvalidInstance'):
+ LoadBalancerName=lb["LoadBalancerName"],
+ Instances=[{"InstanceId": self.instance_id}],
+ )["InstanceStates"]
+ except is_boto3_error_code("InvalidInstance"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- self.module.fail_json_aws(e, msg='Failed to get instance health')
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Failed to get instance health")
if not status:
return None
- return status[0]['State']
+ return status[0]["State"]
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
@@ -289,12 +295,12 @@ class ElbManager:
ec2_elbs = self._get_auto_scaling_group_lbs()
if ec2_elbs:
- list_params['LoadBalancerNames'] = ec2_elbs
+ list_params["LoadBalancerNames"] = ec2_elbs
try:
elbs = self._describe_elbs(**list_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, 'Failed to describe load balancers')
+ self.module.fail_json_aws(e, "Failed to describe load balancers")
if ec2_elbs:
return elbs
@@ -303,7 +309,7 @@ class ElbManager:
# of.
lbs = []
for lb in elbs:
- instance_ids = [i['InstanceId'] for i in lb['Instances']]
+ instance_ids = [i["InstanceId"] for i in lb["Instances"]]
if self.instance_id in instance_ids:
lbs.append(lb)
@@ -311,14 +317,14 @@ class ElbManager:
def _get_auto_scaling_group_lbs(self):
"""Returns a list of ELBs associated with self.instance_id
- indirectly through its auto scaling group membership"""
+ indirectly through its auto scaling group membership"""
try:
asg_instances = self.client_asg.describe_auto_scaling_instances(
- aws_retry=True,
- InstanceIds=[self.instance_id])['AutoScalingInstances']
+ aws_retry=True, InstanceIds=[self.instance_id]
+ )["AutoScalingInstances"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Failed to describe ASG Instance')
+ self.module.fail_json_aws(e, msg="Failed to describe ASG Instance")
if len(asg_instances) > 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
@@ -327,42 +333,40 @@ class ElbManager:
# Instance isn't a member of an ASG
return []
- asg_name = asg_instances[0]['AutoScalingGroupName']
+ asg_name = asg_instances[0]["AutoScalingGroupName"]
try:
asg_instances = self.client_asg.describe_auto_scaling_groups(
- aws_retry=True,
- AutoScalingGroupNames=[asg_name])['AutoScalingGroups']
+ aws_retry=True, AutoScalingGroupNames=[asg_name]
+ )["AutoScalingGroups"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Failed to describe ASG Instance')
+ self.module.fail_json_aws(e, msg="Failed to describe ASG Instance")
if len(asg_instances) != 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
- return asg_instances[0]['LoadBalancerNames']
+ return asg_instances[0]["LoadBalancerNames"]
def _get_instance(self):
"""Returns the description of an instance"""
try:
- result = self.client_ec2.describe_instances(
- aws_retry=True,
- InstanceIds=[self.instance_id])
+ result = self.client_ec2.describe_instances(aws_retry=True, InstanceIds=[self.instance_id])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Failed to describe ASG Instance')
- return result['Reservations'][0]['Instances'][0]
+ self.module.fail_json_aws(e, msg="Failed to describe ASG Instance")
+ return result["Reservations"][0]["Instances"][0]
def main():
argument_spec = dict(
- state={'required': True, 'choices': ['present', 'absent']},
- instance_id={'required': True},
- ec2_elbs={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
- enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
- wait={'required': False, 'default': True, 'type': 'bool'},
- wait_timeout={'required': False, 'default': 0, 'type': 'int'},
+ state={"required": True, "choices": ["present", "absent"]},
+ instance_id={"required": True},
+ ec2_elbs={"default": None, "required": False, "type": "list", "elements": "str"},
+ enable_availability_zone={"default": True, "required": False, "type": "bool"},
+ wait={"required": False, "default": True, "type": "bool"},
+ wait_timeout={"required": False, "default": 0, "type": "int"},
)
required_if = [
- ('state', 'present', ['ec2_elbs']),
+ ("state", "present", ["ec2_elbs"]),
]
module = AnsibleAWSModule(
@@ -371,22 +375,22 @@ def main():
supports_check_mode=True,
)
- ec2_elbs = module.params['ec2_elbs']
- wait = module.params['wait']
- enable_availability_zone = module.params['enable_availability_zone']
- timeout = module.params['wait_timeout']
- instance_id = module.params['instance_id']
+ ec2_elbs = module.params["ec2_elbs"]
+ wait = module.params["wait"]
+ enable_availability_zone = module.params["enable_availability_zone"]
+ timeout = module.params["wait_timeout"]
+ instance_id = module.params["instance_id"]
elb_man = ElbManager(module, instance_id, ec2_elbs)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
- module.fail_json(msg="ELB {0} does not exist".format(elb))
+ module.fail_json(msg=f"ELB {elb} does not exist")
- if module.params['state'] == 'present':
+ if module.params["state"] == "present":
elb_man.register(wait, enable_availability_zone, timeout)
- elif module.params['state'] == 'absent':
+ elif module.params["state"] == "absent":
elb_man.deregister(wait, timeout)
module.exit_json(
@@ -395,5 +399,5 @@ def main():
)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_network_lb.py b/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
index 6dcdfd209..86d8f0872 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_network_lb.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: elb_network_lb
version_added: 1.0.0
@@ -126,17 +123,17 @@ options:
- Sets the type of IP addresses used by the subnets of the specified Application Load Balancer.
choices: [ 'ipv4', 'dualstack' ]
type: str
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
notes:
- Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
- Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create an ELB and attach a listener
@@ -186,10 +183,9 @@ EXAMPLES = r'''
community.aws.elb_network_lb:
name: myelb
state: absent
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
load_balancer:
description: A representation of the Network Load Balancer
returned: when state is present
@@ -328,11 +324,17 @@ load_balancer:
returned: when state is present
type: str
sample: vpc-0011223344
-'''
+"""
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListener
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListeners
+from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import NetworkLoadBalancer
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def create_or_update_elb(elb_obj):
@@ -346,10 +348,12 @@ def create_or_update_elb(elb_obj):
# Tags - only need to play with tags if tags parameter has been set to something
if elb_obj.tags is not None:
-
# Delete necessary tags
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
- boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
+ tags_need_modify, tags_to_delete = compare_aws_tags(
+ boto3_tag_list_to_ansible_dict(elb_obj.elb["tags"]),
+ boto3_tag_list_to_ansible_dict(elb_obj.tags),
+ elb_obj.purge_tags,
+ )
if tags_to_delete:
elb_obj.delete_tags(tags_to_delete)
@@ -366,25 +370,29 @@ def create_or_update_elb(elb_obj):
elb_obj.modify_elb_attributes()
# Listeners
- listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
+ listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb["LoadBalancerArn"])
listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
# Delete listeners
for listener_to_delete in listeners_to_delete:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
+ listener_obj = ELBListener(
+ elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb["LoadBalancerArn"]
+ )
listener_obj.delete()
listeners_obj.changed = True
# Add listeners
for listener_to_add in listeners_to_add:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
+ listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb["LoadBalancerArn"])
listener_obj.add()
listeners_obj.changed = True
# Modify listeners
for listener_to_modify in listeners_to_modify:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
+ listener_obj = ELBListener(
+ elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb["LoadBalancerArn"]
+ )
listener_obj.modify()
listeners_obj.changed = True
@@ -393,8 +401,8 @@ def create_or_update_elb(elb_obj):
elb_obj.changed = True
# Update ELB ip address type only if option has been provided
- if elb_obj.module.params.get('ip_address_type') is not None:
- elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type'))
+ if elb_obj.module.params.get("ip_address_type") is not None:
+ elb_obj.modify_ip_address_type(elb_obj.module.params.get("ip_address_type"))
# Update the objects to pickup changes
# Get the ELB again
@@ -407,24 +415,20 @@ def create_or_update_elb(elb_obj):
# Convert to snake_case and merge in everything we want to return to the user
snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
- snaked_elb['listeners'] = []
+ snaked_elb["listeners"] = []
for listener in listeners_obj.current_listeners:
- snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
+ snaked_elb["listeners"].append(camel_dict_to_snake_dict(listener))
# Change tags to ansible friendly dict
- snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
+ snaked_elb["tags"] = boto3_tag_list_to_ansible_dict(snaked_elb["tags"])
# ip address type
- snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type()
+ snaked_elb["ip_address_type"] = elb_obj.get_elb_ip_address_type()
- elb_obj.module.exit_json(
- changed=elb_obj.changed,
- load_balancer=snaked_elb,
- **snaked_elb)
+ elb_obj.module.exit_json(changed=elb_obj.changed, load_balancer=snaked_elb, **snaked_elb)
def delete_elb(elb_obj):
-
if elb_obj.elb:
elb_obj.delete()
@@ -432,42 +436,42 @@ def delete_elb(elb_obj):
def main():
-
- argument_spec = (
- dict(
- cross_zone_load_balancing=dict(type='bool'),
- deletion_protection=dict(type='bool'),
- listeners=dict(type='list',
- elements='dict',
- options=dict(
- Protocol=dict(type='str', required=True),
- Port=dict(type='int', required=True),
- SslPolicy=dict(type='str'),
- Certificates=dict(type='list', elements='dict'),
- DefaultActions=dict(type='list', required=True, elements='dict')
- )
- ),
- name=dict(required=True, type='str'),
- purge_listeners=dict(default=True, type='bool'),
- purge_tags=dict(default=True, type='bool'),
- subnets=dict(type='list', elements='str'),
- subnet_mappings=dict(type='list', elements='dict'),
- scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
- state=dict(choices=['present', 'absent'], type='str', default='present'),
- tags=dict(type='dict', aliases=['resource_tags']),
- wait_timeout=dict(type='int'),
- wait=dict(type='bool'),
- ip_address_type=dict(type='str', choices=['ipv4', 'dualstack'])
- )
+ argument_spec = dict(
+ cross_zone_load_balancing=dict(type="bool"),
+ deletion_protection=dict(type="bool"),
+ listeners=dict(
+ type="list",
+ elements="dict",
+ options=dict(
+ Protocol=dict(type="str", required=True),
+ Port=dict(type="int", required=True),
+ SslPolicy=dict(type="str"),
+ Certificates=dict(type="list", elements="dict"),
+ DefaultActions=dict(type="list", required=True, elements="dict"),
+ ),
+ ),
+ name=dict(required=True, type="str"),
+ purge_listeners=dict(default=True, type="bool"),
+ purge_tags=dict(default=True, type="bool"),
+ subnets=dict(type="list", elements="str"),
+ subnet_mappings=dict(type="list", elements="dict"),
+ scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]),
+ state=dict(choices=["present", "absent"], type="str", default="present"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ wait_timeout=dict(type="int"),
+ wait=dict(type="bool"),
+ ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]),
)
required_if = [
- ('state', 'present', ('subnets', 'subnet_mappings',), True)
+ ["state", "present", ["subnets", "subnet_mappings"], True],
]
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=required_if,
- mutually_exclusive=[['subnets', 'subnet_mappings']])
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ mutually_exclusive=[["subnets", "subnet_mappings"]],
+ )
# Check for subnets or subnet_mappings if state is present
state = module.params.get("state")
@@ -477,20 +481,20 @@ def main():
if listeners is not None:
for listener in listeners:
for key in listener.keys():
- protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP']
- if key == 'Protocol' and listener[key] not in protocols_list:
+ protocols_list = ["TCP", "TLS", "UDP", "TCP_UDP"]
+ if key == "Protocol" and listener[key] not in protocols_list:
module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list))
- connection = module.client('elbv2')
- connection_ec2 = module.client('ec2')
+ connection = module.client("elbv2")
+ connection_ec2 = module.client("ec2")
elb = NetworkLoadBalancer(connection, connection_ec2, module)
- if state == 'present':
+ if state == "present":
create_or_update_elb(elb)
else:
delete_elb(elb)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target.py b/ansible_collections/community/aws/plugins/modules/elb_target.py
index cff46a62a..22074d496 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_target.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_target.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: elb_target
version_added: 1.0.0
short_description: Manage a target in a target group
description:
- - Used to register or deregister a target in a target group.
-author: "Rob White (@wimnat)"
+ - Used to register or deregister a target in a target group.
+author:
+ - "Rob White (@wimnat)"
options:
deregister_unused:
description:
@@ -68,16 +67,17 @@ options:
required: true
choices: [ 'present', 'absent' ]
type: str
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
notes:
- If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it.
-'''
-EXAMPLES = '''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Register an IP address target to a target group
@@ -105,14 +105,13 @@ EXAMPLES = '''
target_id: i-1234567
target_port: 8080
state: present
+"""
-'''
-
-RETURN = '''
-
-'''
+RETURN = r"""
+"""
-from time import time, sleep
+from time import sleep
+from time import time
try:
import botocore
@@ -121,28 +120,28 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
+
+@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"])
def describe_target_groups_with_backoff(connection, tg_name):
return connection.describe_target_groups(Names=[tg_name])
def convert_tg_name_to_arn(connection, module, tg_name):
-
try:
response = describe_target_groups_with_backoff(connection, tg_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name))
+ module.fail_json_aws(e, msg=f"Unable to describe target group {tg_name}")
- tg_arn = response['TargetGroups'][0]['TargetGroupArn']
+ tg_arn = response["TargetGroups"][0]["TargetGroupArn"]
return tg_arn
-@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
+@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=["TargetGroupNotFound"])
def describe_targets_with_backoff(connection, tg_arn, target):
if target is None:
tg = []
@@ -153,7 +152,6 @@ def describe_targets_with_backoff(connection, tg_arn, target):
def describe_targets(connection, module, tg_arn, target=None):
-
"""
Describe targets in a target group
@@ -165,12 +163,12 @@ def describe_targets(connection, module, tg_arn, target=None):
"""
try:
- targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions']
+ targets = describe_targets_with_backoff(connection, tg_arn, target)["TargetHealthDescriptions"]
if not targets:
return {}
return targets[0]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to describe target health for target {0}".format(target))
+ module.fail_json_aws(e, msg=f"Unable to describe target health for target {target}")
@AWSRetry.jittered_backoff(retries=10, delay=10)
@@ -179,7 +177,6 @@ def register_target_with_backoff(connection, target_group_arn, target):
def register_target(connection, module):
-
"""
Registers a target to a target group
@@ -201,26 +198,32 @@ def register_target(connection, module):
target = dict(Id=target_id)
if target_az:
- target['AvailabilityZone'] = target_az
+ target["AvailabilityZone"] = target_az
if target_port:
- target['Port'] = target_port
+ target["Port"] = target_port
target_description = describe_targets(connection, module, target_group_arn, target)
- if 'Reason' in target_description['TargetHealth']:
- if target_description['TargetHealth']['Reason'] == "Target.NotRegistered":
+ if "Reason" in target_description["TargetHealth"]:
+ if target_description["TargetHealth"]["Reason"] == "Target.NotRegistered":
try:
register_target_with_backoff(connection, target_group_arn, target)
changed = True
if target_status:
- target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
+ target_status_check(
+ connection, module, target_group_arn, target, target_status, target_status_timeout
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target))
+ module.fail_json_aws(e, msg=f"Unable to deregister target {target}")
# Get all targets for the target group
target_descriptions = describe_targets(connection, module, target_group_arn)
- module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
+ module.exit_json(
+ changed=changed,
+ target_health_descriptions=camel_dict_to_snake_dict(target_descriptions),
+ target_group_arn=target_group_arn,
+ )
@AWSRetry.jittered_backoff(retries=10, delay=10)
@@ -229,7 +232,6 @@ def deregister_target_with_backoff(connection, target_group_arn, target):
def deregister_target(connection, module):
-
"""
Deregisters a target to a target group
@@ -251,18 +253,18 @@ def deregister_target(connection, module):
target = dict(Id=target_id)
if target_port:
- target['Port'] = target_port
+ target["Port"] = target_port
target_description = describe_targets(connection, module, target_group_arn, target)
- current_target_state = target_description['TargetHealth']['State']
- current_target_reason = target_description['TargetHealth'].get('Reason')
+ current_target_state = target_description["TargetHealth"]["State"]
+ current_target_reason = target_description["TargetHealth"].get("Reason")
needs_deregister = False
- if deregister_unused and current_target_state == 'unused':
- if current_target_reason != 'Target.NotRegistered':
+ if deregister_unused and current_target_state == "unused":
+ if current_target_reason != "Target.NotRegistered":
needs_deregister = True
- elif current_target_state not in ['unused', 'draining']:
+ elif current_target_state not in ["unused", "draining"]:
needs_deregister = True
if needs_deregister:
@@ -270,11 +272,13 @@ def deregister_target(connection, module):
deregister_target_with_backoff(connection, target_group_arn, target)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Unable to deregister target {0}".format(target))
+ module.fail_json(msg=f"Unable to deregister target {target}")
else:
- if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining':
- module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " +
- "To force deregistration use the 'deregister_unused' option.")
+ if current_target_reason != "Target.NotRegistered" and current_target_state != "draining":
+ module.warn(
+ warning="Your specified target has an 'unused' state but is still registered to the target group. "
+ + "To force deregistration use the 'deregister_unused' option."
+ )
if target_status:
target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
@@ -282,53 +286,60 @@ def deregister_target(connection, module):
# Get all targets for the target group
target_descriptions = describe_targets(connection, module, target_group_arn)
- module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
+ module.exit_json(
+ changed=changed,
+ target_health_descriptions=camel_dict_to_snake_dict(target_descriptions),
+ target_group_arn=target_group_arn,
+ )
def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout):
reached_state = False
timeout = target_status_timeout + time()
while time() < timeout:
- health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State']
+ health_state = describe_targets(connection, module, target_group_arn, target)["TargetHealth"]["State"]
if health_state == target_status:
reached_state = True
break
sleep(1)
if not reached_state:
- module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state))
+ module.fail_json(
+ msg=f"Status check timeout of {target_status_timeout} exceeded, last status was {health_state}: "
+ )
def main():
-
argument_spec = dict(
- deregister_unused=dict(type='bool', default=False),
- target_az=dict(type='str'),
- target_group_arn=dict(type='str'),
- target_group_name=dict(type='str'),
- target_id=dict(type='str', required=True),
- target_port=dict(type='int'),
- target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'),
- target_status_timeout=dict(type='int', default=60),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
+ deregister_unused=dict(type="bool", default=False),
+ target_az=dict(type="str"),
+ target_group_arn=dict(type="str"),
+ target_group_name=dict(type="str"),
+ target_id=dict(type="str", required=True),
+ target_port=dict(type="int"),
+ target_status=dict(
+ choices=["initial", "healthy", "unhealthy", "unused", "draining", "unavailable"], type="str"
+ ),
+ target_status_timeout=dict(type="int", default=60),
+ state=dict(required=True, choices=["present", "absent"], type="str"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- mutually_exclusive=[['target_group_arn', 'target_group_name']],
+ mutually_exclusive=[["target_group_arn", "target_group_name"]],
)
try:
- connection = module.client('elbv2')
+ connection = module.client("elbv2")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
register_target(connection, module)
else:
deregister_target(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_group.py b/ansible_collections/community/aws/plugins/modules/elb_target_group.py
index 45a6e7ae9..71a859ead 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_target_group.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_target_group.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: elb_target_group
version_added: 1.0.0
@@ -204,17 +202,18 @@ options:
- The time to wait for the target group.
default: 200
type: int
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
notes:
- Once a target group has been created, only its health check can then be modified using subsequent calls
-'''
-EXAMPLES = r'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create a target group with a default health check
@@ -271,7 +270,7 @@ EXAMPLES = r'''
Port: 80
state: present
wait_timeout: 200
- wait: True
+ wait: true
- name: Create a target group with IP address targets
community.aws.elb_target_group:
@@ -291,7 +290,7 @@ EXAMPLES = r'''
Port: 80
state: present
wait_timeout: 200
- wait: True
+ wait: true
# Using lambda as targets require that the target group
# itself is allow to invoke the lambda function.
@@ -304,7 +303,7 @@ EXAMPLES = r'''
name: my-lambda-targetgroup
target_type: lambda
state: present
- modify_targets: False
+ modify_targets: false
register: out
- name: second, allow invoke of the lambda
@@ -322,11 +321,10 @@ EXAMPLES = r'''
target_type: lambda
state: present
targets:
- - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
-
-'''
+ - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
+"""
-RETURN = r'''
+RETURN = r"""
deregistration_delay_timeout_seconds:
description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
returned: when state present
@@ -437,7 +435,7 @@ vpc_id:
returned: when state present
type: str
sample: vpc-0123456
-'''
+"""
import time
@@ -448,56 +446,64 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_tg_attributes(connection, module, tg_arn):
try:
_attributes = connection.describe_target_group_attributes(TargetGroupArn=tg_arn, aws_retry=True)
- tg_attributes = boto3_tag_list_to_ansible_dict(_attributes['Attributes'])
+ tg_attributes = boto3_tag_list_to_ansible_dict(_attributes["Attributes"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get target group attributes")
# Replace '.' with '_' in attribute key names to make it more Ansible friendly
- return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items())
+ return dict((k.replace(".", "_"), v) for k, v in tg_attributes.items())
def get_target_group_tags(connection, module, target_group_arn):
try:
_tags = connection.describe_tags(ResourceArns=[target_group_arn], aws_retry=True)
- return _tags['TagDescriptions'][0]['Tags']
+ return _tags["TagDescriptions"][0]["Tags"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get target group tags")
def get_target_group(connection, module, retry_missing=False):
- extra_codes = ['TargetGroupNotFound'] if retry_missing else []
+ extra_codes = ["TargetGroupNotFound"] if retry_missing else []
try:
- target_group_paginator = connection.get_paginator('describe_target_groups').paginate(Names=[module.params.get("name")])
+ target_group_paginator = connection.get_paginator("describe_target_groups").paginate(
+ Names=[module.params.get("name")]
+ )
jittered_retry = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=extra_codes)
result = jittered_retry(target_group_paginator.build_full_result)()
- except is_boto3_error_code('TargetGroupNotFound'):
+ except is_boto3_error_code("TargetGroupNotFound"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Couldn't get target group")
- return result['TargetGroups'][0]
+ return result["TargetGroups"][0]
def wait_for_status(connection, module, target_group_arn, targets, status):
polling_increment_secs = 5
- max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
+ max_retries = module.params.get("wait_timeout") // polling_increment_secs
status_achieved = False
for x in range(0, max_retries):
try:
- response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True)
- if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status:
+ response = connection.describe_target_health(
+ TargetGroupArn=target_group_arn, Targets=targets, aws_retry=True
+ )
+ if response["TargetHealthDescriptions"][0]["TargetHealth"]["State"] == status:
status_achieved = True
break
else:
@@ -527,156 +533,186 @@ def create_or_update_attributes(connection, module, target_group, new_target_gro
update_attributes = []
# Get current attributes
- current_tg_attributes = get_tg_attributes(connection, module, target_group['TargetGroupArn'])
+ current_tg_attributes = get_tg_attributes(connection, module, target_group["TargetGroupArn"])
if deregistration_delay_timeout is not None:
- if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']:
- update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)})
+ if str(deregistration_delay_timeout) != current_tg_attributes["deregistration_delay_timeout_seconds"]:
+ update_attributes.append(
+ {"Key": "deregistration_delay.timeout_seconds", "Value": str(deregistration_delay_timeout)}
+ )
if deregistration_connection_termination is not None:
- if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true":
- update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'})
+ if (
+ deregistration_connection_termination
+ and current_tg_attributes.get("deregistration_delay_connection_termination_enabled") != "true"
+ ):
+ update_attributes.append({"Key": "deregistration_delay.connection_termination.enabled", "Value": "true"})
if stickiness_enabled is not None:
- if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true":
- update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'})
+ if stickiness_enabled and current_tg_attributes["stickiness_enabled"] != "true":
+ update_attributes.append({"Key": "stickiness.enabled", "Value": "true"})
if stickiness_lb_cookie_duration is not None:
- if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']:
- update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)})
+ if str(stickiness_lb_cookie_duration) != current_tg_attributes["stickiness_lb_cookie_duration_seconds"]:
+ update_attributes.append(
+ {"Key": "stickiness.lb_cookie.duration_seconds", "Value": str(stickiness_lb_cookie_duration)}
+ )
if stickiness_type is not None:
- if stickiness_type != current_tg_attributes.get('stickiness_type'):
- update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type})
+ if stickiness_type != current_tg_attributes.get("stickiness_type"):
+ update_attributes.append({"Key": "stickiness.type", "Value": stickiness_type})
if stickiness_app_cookie_name is not None:
- if stickiness_app_cookie_name != current_tg_attributes.get('stickiness_app_cookie_name'):
- update_attributes.append({'Key': 'stickiness.app_cookie.cookie_name', 'Value': str(stickiness_app_cookie_name)})
+ if stickiness_app_cookie_name != current_tg_attributes.get("stickiness_app_cookie_name"):
+ update_attributes.append(
+ {"Key": "stickiness.app_cookie.cookie_name", "Value": str(stickiness_app_cookie_name)}
+ )
if stickiness_app_cookie_duration is not None:
- if str(stickiness_app_cookie_duration) != current_tg_attributes['stickiness_app_cookie_duration_seconds']:
- update_attributes.append({'Key': 'stickiness.app_cookie.duration_seconds', 'Value': str(stickiness_app_cookie_duration)})
+ if str(stickiness_app_cookie_duration) != current_tg_attributes["stickiness_app_cookie_duration_seconds"]:
+ update_attributes.append(
+ {"Key": "stickiness.app_cookie.duration_seconds", "Value": str(stickiness_app_cookie_duration)}
+ )
if preserve_client_ip_enabled is not None:
- if target_type not in ('udp', 'tcp_udp'):
- if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get('preserve_client_ip_enabled'):
- update_attributes.append({'Key': 'preserve_client_ip.enabled', 'Value': str(preserve_client_ip_enabled).lower()})
+ if target_type not in ("udp", "tcp_udp"):
+ if str(preserve_client_ip_enabled).lower() != current_tg_attributes.get("preserve_client_ip_enabled"):
+ update_attributes.append(
+ {"Key": "preserve_client_ip.enabled", "Value": str(preserve_client_ip_enabled).lower()}
+ )
if proxy_protocol_v2_enabled is not None:
- if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get('proxy_protocol_v2_enabled'):
- update_attributes.append({'Key': 'proxy_protocol_v2.enabled', 'Value': str(proxy_protocol_v2_enabled).lower()})
+ if str(proxy_protocol_v2_enabled).lower() != current_tg_attributes.get("proxy_protocol_v2_enabled"):
+ update_attributes.append(
+ {"Key": "proxy_protocol_v2.enabled", "Value": str(proxy_protocol_v2_enabled).lower()}
+ )
if load_balancing_algorithm_type is not None:
- if str(load_balancing_algorithm_type) != current_tg_attributes['load_balancing_algorithm_type']:
- update_attributes.append({'Key': 'load_balancing.algorithm.type', 'Value': str(load_balancing_algorithm_type)})
+ if str(load_balancing_algorithm_type) != current_tg_attributes["load_balancing_algorithm_type"]:
+ update_attributes.append(
+ {"Key": "load_balancing.algorithm.type", "Value": str(load_balancing_algorithm_type)}
+ )
if update_attributes:
try:
- connection.modify_target_group_attributes(TargetGroupArn=target_group['TargetGroupArn'], Attributes=update_attributes, aws_retry=True)
+ connection.modify_target_group_attributes(
+ TargetGroupArn=target_group["TargetGroupArn"], Attributes=update_attributes, aws_retry=True
+ )
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
# Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state
if new_target_group:
- connection.delete_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True)
+ connection.delete_target_group(TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True)
module.fail_json_aws(e, msg="Couldn't delete target group")
return changed
def create_or_update_target_group(connection, module):
-
changed = False
new_target_group = False
params = dict()
target_type = module.params.get("target_type")
- params['Name'] = module.params.get("name")
- params['TargetType'] = target_type
+ params["Name"] = module.params.get("name")
+ params["TargetType"] = target_type
if target_type != "lambda":
- params['Protocol'] = module.params.get("protocol").upper()
- if module.params.get('protocol_version') is not None:
- params['ProtocolVersion'] = module.params.get('protocol_version')
- params['Port'] = module.params.get("port")
- params['VpcId'] = module.params.get("vpc_id")
+ params["Protocol"] = module.params.get("protocol").upper()
+ if module.params.get("protocol_version") is not None:
+ params["ProtocolVersion"] = module.params.get("protocol_version")
+ params["Port"] = module.params.get("port")
+ params["VpcId"] = module.params.get("vpc_id")
tags = module.params.get("tags")
purge_tags = module.params.get("purge_tags")
health_option_keys = [
- "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout",
- "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes"
+ "health_check_path",
+ "health_check_protocol",
+ "health_check_interval",
+ "health_check_timeout",
+ "healthy_threshold_count",
+ "unhealthy_threshold_count",
+ "successful_response_codes",
]
health_options = any(module.params[health_option_key] is not None for health_option_key in health_option_keys)
# Set health check if anything set
if health_options:
-
if module.params.get("health_check_protocol") is not None:
- params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper()
+ params["HealthCheckProtocol"] = module.params.get("health_check_protocol").upper()
if module.params.get("health_check_port") is not None:
- params['HealthCheckPort'] = module.params.get("health_check_port")
+ params["HealthCheckPort"] = module.params.get("health_check_port")
if module.params.get("health_check_interval") is not None:
- params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval")
+ params["HealthCheckIntervalSeconds"] = module.params.get("health_check_interval")
if module.params.get("health_check_timeout") is not None:
- params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout")
+ params["HealthCheckTimeoutSeconds"] = module.params.get("health_check_timeout")
if module.params.get("healthy_threshold_count") is not None:
- params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count")
+ params["HealthyThresholdCount"] = module.params.get("healthy_threshold_count")
if module.params.get("unhealthy_threshold_count") is not None:
- params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count")
+ params["UnhealthyThresholdCount"] = module.params.get("unhealthy_threshold_count")
# Only need to check response code and path for http(s) health checks
protocol = module.params.get("health_check_protocol")
- if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']:
-
+ if protocol is not None and protocol.upper() in ["HTTP", "HTTPS"]:
if module.params.get("health_check_path") is not None:
- params['HealthCheckPath'] = module.params.get("health_check_path")
+ params["HealthCheckPath"] = module.params.get("health_check_path")
if module.params.get("successful_response_codes") is not None:
- params['Matcher'] = {}
- code_key = 'HttpCode'
- protocol_version = module.params.get('protocol_version')
+ params["Matcher"] = {}
+ code_key = "HttpCode"
+ protocol_version = module.params.get("protocol_version")
if protocol_version is not None and protocol_version.upper() == "GRPC":
- code_key = 'GrpcCode'
- params['Matcher'][code_key] = module.params.get("successful_response_codes")
+ code_key = "GrpcCode"
+ params["Matcher"][code_key] = module.params.get("successful_response_codes")
# Get target group
target_group = get_target_group(connection, module)
if target_group:
- diffs = [param for param in ('Port', 'Protocol', 'VpcId')
- if target_group.get(param) != params.get(param)]
+ diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)]
if diffs:
- module.fail_json(msg="Cannot modify %s parameter(s) for a target group" %
- ", ".join(diffs))
+ module.fail_json(msg=f"Cannot modify {', '.join(diffs)} parameter(s) for a target group")
# Target group exists so check health check parameters match what has been passed
health_check_params = dict()
# Modify health check if anything set
if health_options:
-
# Health check protocol
- if 'HealthCheckProtocol' in params and target_group['HealthCheckProtocol'] != params['HealthCheckProtocol']:
- health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol']
+ if "HealthCheckProtocol" in params and target_group["HealthCheckProtocol"] != params["HealthCheckProtocol"]:
+ health_check_params["HealthCheckProtocol"] = params["HealthCheckProtocol"]
# Health check port
- if 'HealthCheckPort' in params and target_group['HealthCheckPort'] != params['HealthCheckPort']:
- health_check_params['HealthCheckPort'] = params['HealthCheckPort']
+ if "HealthCheckPort" in params and target_group["HealthCheckPort"] != params["HealthCheckPort"]:
+ health_check_params["HealthCheckPort"] = params["HealthCheckPort"]
# Health check interval
- if 'HealthCheckIntervalSeconds' in params and target_group['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']:
- health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds']
+ if (
+ "HealthCheckIntervalSeconds" in params
+ and target_group["HealthCheckIntervalSeconds"] != params["HealthCheckIntervalSeconds"]
+ ):
+ health_check_params["HealthCheckIntervalSeconds"] = params["HealthCheckIntervalSeconds"]
# Health check timeout
- if 'HealthCheckTimeoutSeconds' in params and target_group['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']:
- health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds']
+ if (
+ "HealthCheckTimeoutSeconds" in params
+ and target_group["HealthCheckTimeoutSeconds"] != params["HealthCheckTimeoutSeconds"]
+ ):
+ health_check_params["HealthCheckTimeoutSeconds"] = params["HealthCheckTimeoutSeconds"]
# Healthy threshold
- if 'HealthyThresholdCount' in params and target_group['HealthyThresholdCount'] != params['HealthyThresholdCount']:
- health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount']
+ if (
+ "HealthyThresholdCount" in params
+ and target_group["HealthyThresholdCount"] != params["HealthyThresholdCount"]
+ ):
+ health_check_params["HealthyThresholdCount"] = params["HealthyThresholdCount"]
# Unhealthy threshold
- if 'UnhealthyThresholdCount' in params and target_group['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']:
- health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount']
+ if (
+ "UnhealthyThresholdCount" in params
+ and target_group["UnhealthyThresholdCount"] != params["UnhealthyThresholdCount"]
+ ):
+ health_check_params["UnhealthyThresholdCount"] = params["UnhealthyThresholdCount"]
# Only need to check response code and path for http(s) health checks
- if target_group['HealthCheckProtocol'] in ['HTTP', 'HTTPS']:
+ if target_group["HealthCheckProtocol"] in ["HTTP", "HTTPS"]:
# Health check path
- if 'HealthCheckPath' in params and target_group['HealthCheckPath'] != params['HealthCheckPath']:
- health_check_params['HealthCheckPath'] = params['HealthCheckPath']
+ if "HealthCheckPath" in params and target_group["HealthCheckPath"] != params["HealthCheckPath"]:
+ health_check_params["HealthCheckPath"] = params["HealthCheckPath"]
# Matcher (successful response codes)
# TODO: required and here?
@@ -687,12 +723,14 @@ def create_or_update_target_group(connection, module):
current_matcher_list = target_group["Matcher"][code_key].split(",")
requested_matcher_list = params["Matcher"][code_key].split(",")
if set(current_matcher_list) != set(requested_matcher_list):
- health_check_params['Matcher'] = {}
- health_check_params['Matcher'][code_key] = ','.join(requested_matcher_list)
+ health_check_params["Matcher"] = {}
+ health_check_params["Matcher"][code_key] = ",".join(requested_matcher_list)
try:
if health_check_params:
- connection.modify_target_group(TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True, **health_check_params)
+ connection.modify_target_group(
+ TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True, **health_check_params
+ )
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't update target group")
@@ -703,27 +741,27 @@ def create_or_update_target_group(connection, module):
# describe_target_health seems to be the only way to get them
try:
current_targets = connection.describe_target_health(
- TargetGroupArn=target_group['TargetGroupArn'], aws_retry=True)
+ TargetGroupArn=target_group["TargetGroupArn"], aws_retry=True
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get target group health")
if module.params.get("targets"):
-
if target_type != "lambda":
- params['Targets'] = module.params.get("targets")
+ params["Targets"] = module.params.get("targets")
# Correct type of target ports
- for target in params['Targets']:
- target['Port'] = int(target.get('Port', module.params.get('port')))
+ for target in params["Targets"]:
+ target["Port"] = int(target.get("Port", module.params.get("port")))
current_instance_ids = []
- for instance in current_targets['TargetHealthDescriptions']:
- current_instance_ids.append(instance['Target']['Id'])
+ for instance in current_targets["TargetHealthDescriptions"]:
+ current_instance_ids.append(instance["Target"]["Id"])
new_instance_ids = []
- for instance in params['Targets']:
- new_instance_ids.append(instance['Id'])
+ for instance in params["Targets"]:
+ new_instance_ids.append(instance["Id"])
add_instances = set(new_instance_ids) - set(current_instance_ids)
@@ -738,37 +776,49 @@ def create_or_update_target_group(connection, module):
changed = True
try:
- connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_add, aws_retry=True)
+ connection.register_targets(
+ TargetGroupArn=target_group["TargetGroupArn"], Targets=instances_to_add, aws_retry=True
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't register targets")
if module.params.get("wait"):
status_achieved, registered_instances = wait_for_status(
- connection, module, target_group['TargetGroupArn'], instances_to_add, 'healthy')
+ connection, module, target_group["TargetGroupArn"], instances_to_add, "healthy"
+ )
if not status_achieved:
module.fail_json(
- msg='Error waiting for target registration to be healthy - please check the AWS console')
+ msg="Error waiting for target registration to be healthy - please check the AWS console"
+ )
remove_instances = set(current_instance_ids) - set(new_instance_ids)
if remove_instances:
instances_to_remove = []
- for target in current_targets['TargetHealthDescriptions']:
- if target['Target']['Id'] in remove_instances:
- instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
+ for target in current_targets["TargetHealthDescriptions"]:
+ if target["Target"]["Id"] in remove_instances:
+ instances_to_remove.append(
+ {"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]}
+ )
changed = True
try:
- connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True)
+ connection.deregister_targets(
+ TargetGroupArn=target_group["TargetGroupArn"],
+ Targets=instances_to_remove,
+ aws_retry=True,
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't remove targets")
if module.params.get("wait"):
status_achieved, registered_instances = wait_for_status(
- connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused')
+ connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused"
+ )
if not status_achieved:
module.fail_json(
- msg='Error waiting for target deregistration - please check the AWS console')
+ msg="Error waiting for target deregistration - please check the AWS console"
+ )
# register lambda target
else:
@@ -786,40 +836,40 @@ def create_or_update_target_group(connection, module):
if changed:
if target.get("Id"):
response = connection.register_targets(
- TargetGroupArn=target_group['TargetGroupArn'],
- Targets=[
- {
- "Id": target['Id']
- }
- ],
- aws_retry=True
+ TargetGroupArn=target_group["TargetGroupArn"],
+ Targets=[{"Id": target["Id"]}],
+ aws_retry=True,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Couldn't register targets")
+ module.fail_json_aws(e, msg="Couldn't register targets")
else:
if target_type != "lambda":
-
- current_instances = current_targets['TargetHealthDescriptions']
+ current_instances = current_targets["TargetHealthDescriptions"]
if current_instances:
instances_to_remove = []
- for target in current_targets['TargetHealthDescriptions']:
- instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
+ for target in current_targets["TargetHealthDescriptions"]:
+ instances_to_remove.append({"Id": target["Target"]["Id"], "Port": target["Target"]["Port"]})
changed = True
try:
- connection.deregister_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=instances_to_remove, aws_retry=True)
+ connection.deregister_targets(
+ TargetGroupArn=target_group["TargetGroupArn"],
+ Targets=instances_to_remove,
+ aws_retry=True,
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't remove targets")
if module.params.get("wait"):
status_achieved, registered_instances = wait_for_status(
- connection, module, target_group['TargetGroupArn'], instances_to_remove, 'unused')
+ connection, module, target_group["TargetGroupArn"], instances_to_remove, "unused"
+ )
if not status_achieved:
module.fail_json(
- msg='Error waiting for target deregistration - please check the AWS console')
+ msg="Error waiting for target deregistration - please check the AWS console"
+ )
# remove lambda targets
else:
@@ -830,7 +880,10 @@ def create_or_update_target_group(connection, module):
target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"]
if changed:
connection.deregister_targets(
- TargetGroupArn=target_group['TargetGroupArn'], Targets=[{"Id": target_to_remove}], aws_retry=True)
+ TargetGroupArn=target_group["TargetGroupArn"],
+ Targets=[{"Id": target_to_remove}],
+ aws_retry=True,
+ )
else:
try:
connection.create_target_group(aws_retry=True, **params)
@@ -843,33 +896,32 @@ def create_or_update_target_group(connection, module):
if module.params.get("targets"):
if target_type != "lambda":
- params['Targets'] = module.params.get("targets")
+ params["Targets"] = module.params.get("targets")
try:
- connection.register_targets(TargetGroupArn=target_group['TargetGroupArn'], Targets=params['Targets'], aws_retry=True)
+ connection.register_targets(
+ TargetGroupArn=target_group["TargetGroupArn"], Targets=params["Targets"], aws_retry=True
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't register targets")
if module.params.get("wait"):
- status_achieved, registered_instances = wait_for_status(connection, module, target_group['TargetGroupArn'], params['Targets'], 'healthy')
+ status_achieved, registered_instances = wait_for_status(
+ connection, module, target_group["TargetGroupArn"], params["Targets"], "healthy"
+ )
if not status_achieved:
- module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console')
+ module.fail_json(
+ msg="Error waiting for target registration to be healthy - please check the AWS console"
+ )
else:
try:
target = module.params.get("targets")[0]
response = connection.register_targets(
- TargetGroupArn=target_group['TargetGroupArn'],
- Targets=[
- {
- "Id": target["Id"]
- }
- ],
- aws_retry=True
+ TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True
)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Couldn't register targets")
+ module.fail_json_aws(e, msg="Couldn't register targets")
attributes_update = create_or_update_attributes(connection, module, target_group, new_target_group)
@@ -879,13 +931,17 @@ def create_or_update_target_group(connection, module):
# Tags - only need to play with tags if tags parameter has been set to something
if tags is not None:
# Get tags
- current_tags = get_target_group_tags(connection, module, target_group['TargetGroupArn'])
+ current_tags = get_target_group_tags(connection, module, target_group["TargetGroupArn"])
# Delete necessary tags
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags)
+ tags_need_modify, tags_to_delete = compare_aws_tags(
+ boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags
+ )
if tags_to_delete:
try:
- connection.remove_tags(ResourceArns=[target_group['TargetGroupArn']], TagKeys=tags_to_delete, aws_retry=True)
+ connection.remove_tags(
+ ResourceArns=[target_group["TargetGroupArn"]], TagKeys=tags_to_delete, aws_retry=True
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete tags from target group")
changed = True
@@ -893,7 +949,11 @@ def create_or_update_target_group(connection, module):
# Add/update tags
if tags_need_modify:
try:
- connection.add_tags(ResourceArns=[target_group['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify), aws_retry=True)
+ connection.add_tags(
+ ResourceArns=[target_group["TargetGroupArn"]],
+ Tags=ansible_dict_to_boto3_tag_list(tags_need_modify),
+ aws_retry=True,
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't add tags to target group")
changed = True
@@ -902,12 +962,14 @@ def create_or_update_target_group(connection, module):
target_group = get_target_group(connection, module)
# Get the target group attributes again
- target_group.update(get_tg_attributes(connection, module, target_group['TargetGroupArn']))
+ target_group.update(get_tg_attributes(connection, module, target_group["TargetGroupArn"]))
# Convert target_group to snake_case
snaked_tg = camel_dict_to_snake_dict(target_group)
- snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, target_group['TargetGroupArn']))
+ snaked_tg["tags"] = boto3_tag_list_to_ansible_dict(
+ get_target_group_tags(connection, module, target_group["TargetGroupArn"])
+ )
module.exit_json(changed=changed, **snaked_tg)
@@ -918,7 +980,7 @@ def delete_target_group(connection, module):
if tg:
try:
- connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'], aws_retry=True)
+ connection.delete_target_group(TargetGroupArn=tg["TargetGroupArn"], aws_retry=True)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete target group")
@@ -927,66 +989,69 @@ def delete_target_group(connection, module):
def main():
- protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP',
- 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
+ protocols_list = ["http", "https", "tcp", "tls", "udp", "tcp_udp", "HTTP", "HTTPS", "TCP", "TLS", "UDP", "TCP_UDP"]
argument_spec = dict(
- deregistration_delay_timeout=dict(type='int'),
- deregistration_connection_termination=dict(type='bool', default=False),
+ deregistration_delay_timeout=dict(type="int"),
+ deregistration_connection_termination=dict(type="bool", default=False),
health_check_protocol=dict(choices=protocols_list),
health_check_port=dict(),
health_check_path=dict(),
- health_check_interval=dict(type='int'),
- health_check_timeout=dict(type='int'),
- healthy_threshold_count=dict(type='int'),
- modify_targets=dict(default=True, type='bool'),
+ health_check_interval=dict(type="int"),
+ health_check_timeout=dict(type="int"),
+ healthy_threshold_count=dict(type="int"),
+ modify_targets=dict(default=True, type="bool"),
name=dict(required=True),
- port=dict(type='int'),
+ port=dict(type="int"),
protocol=dict(choices=protocols_list),
- protocol_version=dict(type='str', choices=['GRPC', 'HTTP1', 'HTTP2']),
- purge_tags=dict(default=True, type='bool'),
- stickiness_enabled=dict(type='bool'),
+ protocol_version=dict(type="str", choices=["GRPC", "HTTP1", "HTTP2"]),
+ purge_tags=dict(default=True, type="bool"),
+ stickiness_enabled=dict(type="bool"),
stickiness_type=dict(),
- stickiness_lb_cookie_duration=dict(type='int'),
- stickiness_app_cookie_duration=dict(type='int'),
+ stickiness_lb_cookie_duration=dict(type="int"),
+ stickiness_app_cookie_duration=dict(type="int"),
stickiness_app_cookie_name=dict(),
- load_balancing_algorithm_type=dict(type='str', choices=['round_robin', 'least_outstanding_requests']),
- state=dict(required=True, choices=['present', 'absent']),
+ load_balancing_algorithm_type=dict(type="str", choices=["round_robin", "least_outstanding_requests"]),
+ state=dict(required=True, choices=["present", "absent"]),
successful_response_codes=dict(),
- tags=dict(type='dict', aliases=['resource_tags']),
- target_type=dict(choices=['instance', 'ip', 'lambda', 'alb']),
- targets=dict(type='list', elements='dict'),
- unhealthy_threshold_count=dict(type='int'),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ target_type=dict(choices=["instance", "ip", "lambda", "alb"]),
+ targets=dict(type="list", elements="dict"),
+ unhealthy_threshold_count=dict(type="int"),
vpc_id=dict(),
- preserve_client_ip_enabled=dict(type='bool'),
- proxy_protocol_v2_enabled=dict(type='bool'),
- wait_timeout=dict(type='int', default=200),
- wait=dict(type='bool', default=False)
+ preserve_client_ip_enabled=dict(type="bool"),
+ proxy_protocol_v2_enabled=dict(type="bool"),
+ wait_timeout=dict(type="int", default=200),
+ wait=dict(type="bool", default=False),
)
required_by = dict(
- health_check_path=['health_check_protocol'],
- successful_response_codes=['health_check_protocol'],
+ health_check_path=["health_check_protocol"],
+ successful_response_codes=["health_check_protocol"],
)
required_if = [
- ['target_type', 'instance', ['protocol', 'port', 'vpc_id']],
- ['target_type', 'ip', ['protocol', 'port', 'vpc_id']],
- ['target_type', 'alb', ['protocol', 'port', 'vpc_id']],
+ ["target_type", "instance", ["protocol", "port", "vpc_id"]],
+ ["target_type", "ip", ["protocol", "port", "vpc_id"]],
+ ["target_type", "alb", ["protocol", "port", "vpc_id"]],
]
module = AnsibleAWSModule(argument_spec=argument_spec, required_by=required_by, required_if=required_if)
- if module.params.get('target_type') is None:
- module.params['target_type'] = 'instance'
+ if module.params.get("target_type") is None:
+ module.params["target_type"] = "instance"
- connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10))
- if module.params.get('state') == 'present':
- if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None):
- module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination")
+ if module.params.get("state") == "present":
+ if module.params.get("protocol") in ["http", "https", "HTTP", "HTTPS"] and module.params.get(
+ "deregistration_connection_termination", None
+ ):
+ module.fail_json(
+ msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination"
+ )
create_or_update_target_group(connection, module)
else:
delete_target_group(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py b/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
index 86cc03782..d0b013bfd 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_target_group_info.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: elb_target_group_info
version_added: 1.0.0
short_description: Gather information about ELB target groups in AWS
description:
- - Gather information about ELB target groups in AWS
-author: Rob White (@wimnat)
+ - Gather information about ELB target groups in AWS
+author:
+ - Rob White (@wimnat)
options:
load_balancer_arn:
description:
@@ -40,13 +39,12 @@ options:
type: bool
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
+- amazon.aws.common.modules
+- amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all target groups
@@ -61,10 +59,9 @@ EXAMPLES = r'''
names:
- tg1
- tg2
+"""
-'''
-
-RETURN = r'''
+RETURN = r"""
target_groups:
description: a list of target groups
returned: always
@@ -204,7 +201,7 @@ target_groups:
returned: always
type: str
sample: vpc-0123456
-'''
+"""
try:
import botocore
@@ -213,47 +210,48 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
@AWSRetry.jittered_backoff(retries=10)
def get_paginator(**kwargs):
- paginator = client.get_paginator('describe_target_groups')
+ paginator = client.get_paginator("describe_target_groups")
return paginator.paginate(**kwargs).build_full_result()
def get_target_group_attributes(target_group_arn):
-
try:
- target_group_attributes = boto3_tag_list_to_ansible_dict(client.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
+ target_group_attributes = boto3_tag_list_to_ansible_dict(
+ client.describe_target_group_attributes(TargetGroupArn=target_group_arn)["Attributes"]
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe target group attributes")
# Replace '.' with '_' in attribute key names to make it more Ansibley
- return dict((k.replace('.', '_'), v)
- for (k, v) in target_group_attributes.items())
+ return dict((k.replace(".", "_"), v) for (k, v) in target_group_attributes.items())
def get_target_group_tags(target_group_arn):
-
try:
- return boto3_tag_list_to_ansible_dict(client.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
+ return boto3_tag_list_to_ansible_dict(
+ client.describe_tags(ResourceArns=[target_group_arn])["TagDescriptions"][0]["Tags"]
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe group tags")
def get_target_group_targets_health(target_group_arn):
-
try:
- return client.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions']
+ return client.describe_target_health(TargetGroupArn=target_group_arn)["TargetHealthDescriptions"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get target health")
def list_target_groups():
-
load_balancer_arn = module.params.get("load_balancer_arn")
target_group_arns = module.params.get("target_group_arns")
names = module.params.get("names")
@@ -268,24 +266,29 @@ def list_target_groups():
target_groups = get_paginator(TargetGroupArns=target_group_arns)
if names:
target_groups = get_paginator(Names=names)
- except is_boto3_error_code('TargetGroupNotFound'):
+ except is_boto3_error_code("TargetGroupNotFound"):
module.exit_json(target_groups=[])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to list target groups")
# Get the attributes and tags for each target group
- for target_group in target_groups['TargetGroups']:
- target_group.update(get_target_group_attributes(target_group['TargetGroupArn']))
+ for target_group in target_groups["TargetGroups"]:
+ target_group.update(get_target_group_attributes(target_group["TargetGroupArn"]))
# Turn the boto3 result in to ansible_friendly_snaked_names
- snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
+ snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups["TargetGroups"]]
# Get tags for each target group
for snaked_target_group in snaked_target_groups:
- snaked_target_group['tags'] = get_target_group_tags(snaked_target_group['target_group_arn'])
+ snaked_target_group["tags"] = get_target_group_tags(snaked_target_group["target_group_arn"])
if collect_targets_health:
- snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict(
- target) for target in get_target_group_targets_health(snaked_target_group['target_group_arn'])]
+ snaked_target_group["targets_health_description"] = [
+ camel_dict_to_snake_dict(target)
+ for target in get_target_group_targets_health(snaked_target_group["target_group_arn"])
+ ]
module.exit_json(target_groups=snaked_target_groups)
@@ -295,25 +298,25 @@ def main():
global client
argument_spec = dict(
- load_balancer_arn=dict(type='str'),
- target_group_arns=dict(type='list', elements='str'),
- names=dict(type='list', elements='str'),
- collect_targets_health=dict(default=False, type='bool', required=False),
+ load_balancer_arn=dict(type="str"),
+ target_group_arns=dict(type="list", elements="str"),
+ names=dict(type="list", elements="str"),
+ collect_targets_health=dict(default=False, type="bool", required=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']],
+ mutually_exclusive=[["load_balancer_arn", "target_group_arns", "names"]],
supports_check_mode=True,
)
try:
- client = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ client = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
list_target_groups()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/elb_target_info.py b/ansible_collections/community/aws/plugins/modules/elb_target_info.py
index 4f91ac7f3..ad0b3c74b 100644
--- a/ansible_collections/community/aws/plugins/modules/elb_target_info.py
+++ b/ansible_collections/community/aws/plugins/modules/elb_target_info.py
@@ -1,10 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Yaakov Kuperman <ykuperman@gmail.com>
# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: elb_target_info
version_added: 1.0.0
@@ -12,8 +12,8 @@ short_description: Gathers which target groups a target is associated with.
description:
- This module will search through every target group in a region to find
which ones have registered a given instance ID or IP.
-
-author: "Yaakov Kuperman (@yaakov-github)"
+author:
+ - "Yaakov Kuperman (@yaakov-github)"
options:
instance_id:
description:
@@ -25,109 +25,108 @@ options:
- Whether or not to get target groups not used by any load balancers.
type: bool
default: true
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# practical use case - dynamically de-registering and re-registering nodes
- - name: Get EC2 Metadata
- amazon.aws.ec2_metadata_facts:
-
- - name: Get initial list of target groups
- delegate_to: localhost
- community.aws.elb_target_info:
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
-
- - name: save fact for later
- ansible.builtin.set_fact:
- original_tgs: "{{ target_info.instance_target_groups }}"
-
- - name: Deregister instance from all target groups
- delegate_to: localhost
- community.aws.elb_target:
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: absent
- target_status: "draining"
- region: "{{ ansible_ec2_placement_region }}"
- with_subelements:
- - "{{ original_tgs }}"
- - "targets"
-
- # This avoids having to wait for 'elb_target' to serially deregister each
- # target group. An alternative would be to run all of the 'elb_target'
- # tasks async and wait for them to finish.
-
- - name: wait for all targets to deregister simultaneously
- delegate_to: localhost
- community.aws.elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
- until: (target_info.instance_target_groups | length) == 0
- retries: 60
- delay: 10
-
- - name: reregister in elbv2s
- community.aws.elb_target:
- region: "{{ ansible_ec2_placement_region }}"
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: present
- target_status: "initial"
- with_subelements:
- - "{{ original_tgs }}"
- - "targets"
-
- # wait until all groups associated with this instance are 'healthy' or
- # 'unused'
- - name: wait for registration
- community.aws.elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
- until: (target_info.instance_target_groups |
- map(attribute='targets') |
- flatten |
- map(attribute='target_health') |
- rejectattr('state', 'equalto', 'healthy') |
- rejectattr('state', 'equalto', 'unused') |
- list |
- length) == 0
- retries: 61
- delay: 10
+- name: Get EC2 Metadata
+ amazon.aws.ec2_metadata_facts:
+
+- name: Get initial list of target groups
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+
+- name: save fact for later
+ ansible.builtin.set_fact:
+ original_tgs: "{{ target_info.instance_target_groups }}"
+
+- name: Deregister instance from all target groups
+ delegate_to: localhost
+ community.aws.elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: absent
+ target_status: "draining"
+ region: "{{ ansible_ec2_placement_region }}"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # This avoids having to wait for 'elb_target' to serially deregister each
+ # target group. An alternative would be to run all of the 'elb_target'
+ # tasks async and wait for them to finish.
+
+- name: wait for all targets to deregister simultaneously
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups | length) == 0
+ retries: 60
+ delay: 10
+
+- name: reregister in elbv2s
+ community.aws.elb_target:
+ region: "{{ ansible_ec2_placement_region }}"
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: present
+ target_status: "initial"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+# wait until all groups associated with this instance are 'healthy' or
+# 'unused'
+- name: wait for registration
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups |
+ map(attribute='targets') |
+ flatten |
+ map(attribute='target_health') |
+ rejectattr('state', 'equalto', 'healthy') |
+ rejectattr('state', 'equalto', 'unused') |
+ list |
+ length) == 0
+ retries: 61
+ delay: 10
# using the target groups to generate AWS CLI commands to reregister the
# instance - useful in case the playbook fails mid-run and manual
# rollback is required
- - name: "reregistration commands: ELBv2s"
- ansible.builtin.debug:
- msg: >
- aws --region {{ansible_ec2_placement_region}} elbv2
- register-targets --target-group-arn {{item.target_group_arn}}
- --targets{%for target in item.targets%}
- Id={{target.target_id}},
- Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
- {%endif%}
- {%endfor%}
- loop: "{{target_info.instance_target_groups}}"
-
+- name: "reregistration commands: ELBv2s"
+ ansible.builtin.debug:
+ msg: >
+ aws --region {{ansible_ec2_placement_region}} elbv2
+ register-targets --target-group-arn {{item.target_group_arn}}
+ --targets{%for target in item.targets%}
+ Id={{target.target_id}},
+ Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
+ {%endif%}
+ {%endfor%}
+ loop: "{{target_info.instance_target_groups}}"
"""
-RETURN = """
+RETURN = r"""
instance_target_groups:
description: a list of target groups to which the instance is registered to
returned: always
@@ -204,20 +203,23 @@ instance_target_groups:
type: str
"""
-__metaclass__ = type
-
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
# we can handle the lack of boto3 based on the ec2 module
pass
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class Target(object):
"""Models a target in a target group"""
+
def __init__(self, target_id, port, az, raw_target_health):
self.target_port = port
self.target_id = target_id
@@ -238,10 +240,7 @@ class TargetGroup(object):
self.targets = []
def add_target(self, target_id, target_port, target_az, raw_target_health):
- self.targets.append(Target(target_id,
- target_port,
- target_az,
- raw_target_health))
+ self.targets.append(Target(target_id, target_port, target_az, raw_target_health))
def to_dict(self):
object_dict = vars(self)
@@ -253,28 +252,17 @@ class TargetGroup(object):
class TargetInfoGatherer(object):
-
def __init__(self, module, instance_id, get_unused_target_groups):
self.module = module
try:
- self.ec2 = self.module.client(
- "ec2",
- retry_decorator=AWSRetry.jittered_backoff(retries=10)
- )
+ self.ec2 = self.module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10))
except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e,
- msg="Couldn't connect to ec2"
- )
+ self.module.fail_json_aws(e, msg="Couldn't connect to ec2")
try:
- self.elbv2 = self.module.client(
- "elbv2",
- retry_decorator=AWSRetry.jittered_backoff(retries=10)
- )
+ self.elbv2 = self.module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10))
except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e,
- msg="Could not connect to elbv2"
- )
+ self.module.fail_json_aws(e, msg="Could not connect to elbv2")
self.instance_id = instance_id
self.get_unused_target_groups = get_unused_target_groups
@@ -282,25 +270,19 @@ class TargetInfoGatherer(object):
def _get_instance_ips(self):
"""Fetch all IPs associated with this instance so that we can determine
- whether or not an instance is in an IP-based target group"""
+ whether or not an instance is in an IP-based target group"""
try:
# get ahold of the instance in the API
- reservations = self.ec2.describe_instances(
- InstanceIds=[self.instance_id],
- aws_retry=True
- )["Reservations"]
+ reservations = self.ec2.describe_instances(InstanceIds=[self.instance_id], aws_retry=True)["Reservations"]
except (BotoCoreError, ClientError) as e:
# typically this will happen if the instance doesn't exist
- self.module.fail_json_aws(e,
- msg="Could not get instance info" +
- " for instance '%s'" %
- (self.instance_id)
- )
+ self.module.fail_json_aws(
+ e,
+ msg=f"Could not get instance info for instance '{self.instance_id}'",
+ )
if len(reservations) < 1:
- self.module.fail_json(
- msg="Instance ID %s could not be found" % self.instance_id
- )
+ self.module.fail_json(msg=f"Instance ID {self.instance_id} could not be found")
instance = reservations[0]["Instances"][0]
@@ -317,38 +299,36 @@ class TargetInfoGatherer(object):
def _get_target_group_objects(self):
"""helper function to build a list of TargetGroup objects based on
- the AWS API"""
+ the AWS API"""
try:
- paginator = self.elbv2.get_paginator(
- "describe_target_groups"
- )
+ paginator = self.elbv2.get_paginator("describe_target_groups")
tg_response = paginator.paginate().build_full_result()
except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e,
- msg="Could not describe target" +
- " groups"
- )
+ self.module.fail_json_aws(
+ e,
+ msg="Could not describe target groups",
+ )
# build list of TargetGroup objects representing every target group in
# the system
target_groups = []
for each_tg in tg_response["TargetGroups"]:
- if not self.get_unused_target_groups and \
- len(each_tg["LoadBalancerArns"]) < 1:
+ if not self.get_unused_target_groups and len(each_tg["LoadBalancerArns"]) < 1:
# only collect target groups that actually are connected
# to LBs
continue
target_groups.append(
- TargetGroup(target_group_arn=each_tg["TargetGroupArn"],
- target_group_type=each_tg["TargetType"],
- )
+ TargetGroup(
+ target_group_arn=each_tg["TargetGroupArn"],
+ target_group_type=each_tg["TargetType"],
+ )
)
return target_groups
def _get_target_descriptions(self, target_groups):
"""Helper function to build a list of all the target descriptions
- for this target in a target group"""
+ for this target in a target group"""
# Build a list of all the target groups pointing to this instance
# based on the previous list
tgs = set()
@@ -356,37 +336,25 @@ class TargetInfoGatherer(object):
for tg in target_groups:
try:
# Get the list of targets for that target group
- response = self.elbv2.describe_target_health(
- TargetGroupArn=tg.target_group_arn,
- aws_retry=True
- )
+ response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e,
- msg="Could not describe target " +
- "health for target group %s" %
- tg.target_group_arn
- )
+ self.module.fail_json_aws(
+ e, msg="Could not describe target " + f"health for target group {tg.target_group_arn}"
+ )
for t in response["TargetHealthDescriptions"]:
# If the target group has this instance as a target, add to
# list. This logic also accounts for the possibility of a
# target being in the target group multiple times with
# overridden ports
- if t["Target"]["Id"] == self.instance_id or \
- t["Target"]["Id"] in self.instance_ips:
-
+ if t["Target"]["Id"] == self.instance_id or t["Target"]["Id"] in self.instance_ips:
# The 'AvailabilityZone' parameter is a weird one, see the
# API docs for more. Basically it's only supposed to be
# there under very specific circumstances, so we need
# to account for that
- az = t["Target"]["AvailabilityZone"] \
- if "AvailabilityZone" in t["Target"] \
- else None
-
- tg.add_target(t["Target"]["Id"],
- t["Target"]["Port"],
- az,
- t["TargetHealth"])
+ az = t["Target"]["AvailabilityZone"] if "AvailabilityZone" in t["Target"] else None
+
+ tg.add_target(t["Target"]["Id"], t["Target"]["Port"], az, t["TargetHealth"])
# since tgs is a set, each target group will be added only
# once, even though we call add on each successful match
tgs.add(tg)
@@ -404,8 +372,7 @@ class TargetInfoGatherer(object):
def main():
argument_spec = dict(
instance_id={"required": True, "type": "str"},
- get_unused_target_groups={"required": False,
- "default": True, "type": "bool"}
+ get_unused_target_groups={"required": False, "default": True, "type": "bool"},
)
module = AnsibleAWSModule(
@@ -416,10 +383,7 @@ def main():
instance_id = module.params["instance_id"]
get_unused_target_groups = module.params["get_unused_target_groups"]
- tg_gatherer = TargetInfoGatherer(module,
- instance_id,
- get_unused_target_groups
- )
+ tg_gatherer = TargetInfoGatherer(module, instance_id, get_unused_target_groups)
instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs]
diff --git a/ansible_collections/community/aws/plugins/modules/glue_connection.py b/ansible_collections/community/aws/plugins/modules/glue_connection.py
index bcfacb171..18039a861 100644
--- a/ansible_collections/community/aws/plugins/modules/glue_connection.py
+++ b/ansible_collections/community/aws/plugins/modules/glue_connection.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: glue_connection
version_added: 1.0.0
@@ -72,12 +70,12 @@ options:
- Required when I(connection_type=NETWORK).
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an AWS Glue connection
@@ -106,9 +104,9 @@ EXAMPLES = r'''
- community.aws.glue_connection:
name: my-glue-connection
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
connection_properties:
description:
- (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection.
@@ -157,11 +155,11 @@ raw_connection_properties:
returned: when state is present
type: dict
sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'}
-'''
+"""
-# Non-ansible imports
import copy
import time
+
try:
import botocore
except ImportError:
@@ -169,10 +167,11 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def _get_glue_connection(connection, module):
@@ -187,13 +186,13 @@ def _get_glue_connection(connection, module):
connection_name = module.params.get("name")
connection_catalog_id = module.params.get("catalog_id")
- params = {'Name': connection_name}
+ params = {"Name": connection_name}
if connection_catalog_id is not None:
- params['CatalogId'] = connection_catalog_id
+ params["CatalogId"] = connection_catalog_id
try:
- return connection.get_connection(aws_retry=True, **params)['Connection']
- except is_boto3_error_code('EntityNotFoundException'):
+ return connection.get_connection(aws_retry=True, **params)["Connection"]
+ except is_boto3_error_code("EntityNotFoundException"):
return None
@@ -209,37 +208,50 @@ def _compare_glue_connection_params(user_params, current_params):
# Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
# To counter this, add the key if it's missing with a blank value
- if 'Description' not in current_params:
- current_params['Description'] = ""
- if 'MatchCriteria' not in current_params:
- current_params['MatchCriteria'] = list()
- if 'PhysicalConnectionRequirements' not in current_params:
- current_params['PhysicalConnectionRequirements'] = dict()
- current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = []
- current_params['PhysicalConnectionRequirements']['SubnetId'] = ""
-
- if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \
- != current_params['ConnectionProperties']:
+ if "Description" not in current_params:
+ current_params["Description"] = ""
+ if "MatchCriteria" not in current_params:
+ current_params["MatchCriteria"] = list()
+ if "PhysicalConnectionRequirements" not in current_params:
+ current_params["PhysicalConnectionRequirements"] = dict()
+ current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = []
+ current_params["PhysicalConnectionRequirements"]["SubnetId"] = ""
+
+ if (
+ "ConnectionProperties" in user_params["ConnectionInput"]
+ and user_params["ConnectionInput"]["ConnectionProperties"] != current_params["ConnectionProperties"]
+ ):
return True
- if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \
- != current_params['ConnectionType']:
+ if (
+ "ConnectionType" in user_params["ConnectionInput"]
+ and user_params["ConnectionInput"]["ConnectionType"] != current_params["ConnectionType"]
+ ):
return True
- if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']:
+ if (
+ "Description" in user_params["ConnectionInput"]
+ and user_params["ConnectionInput"]["Description"] != current_params["Description"]
+ ):
return True
- if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']):
+ if "MatchCriteria" in user_params["ConnectionInput"] and set(
+ user_params["ConnectionInput"]["MatchCriteria"]
+ ) != set(current_params["MatchCriteria"]):
return True
- if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']:
- if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
- set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \
- != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']):
+ if "PhysicalConnectionRequirements" in user_params["ConnectionInput"]:
+ if "SecurityGroupIdList" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"] and set(
+ user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"]
+ ) != set(current_params["PhysicalConnectionRequirements"]["SecurityGroupIdList"]):
return True
- if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
- user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \
- != current_params['PhysicalConnectionRequirements']['SubnetId']:
+ if (
+ "SubnetId" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"]
+ and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"]
+ != current_params["PhysicalConnectionRequirements"]["SubnetId"]
+ ):
return True
- if 'AvailabilityZone' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
- user_params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] \
- != current_params['PhysicalConnectionRequirements']['AvailabilityZone']:
+ if (
+ "AvailabilityZone" in user_params["ConnectionInput"]["PhysicalConnectionRequirements"]
+ and user_params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"]
+ != current_params["PhysicalConnectionRequirements"]["AvailabilityZone"]
+ ):
return True
return False
@@ -253,11 +265,11 @@ def _await_glue_connection(connection, module):
while wait_timeout > time.time():
glue_connection = _get_glue_connection(connection, module)
- if glue_connection and glue_connection.get('Name'):
+ if glue_connection and glue_connection.get("Name"):
return glue_connection
time.sleep(check_interval)
- module.fail_json(msg='Timeout waiting for Glue connection %s' % module.params.get('name'))
+ module.fail_json(msg=f"Timeout waiting for Glue connection {module.params.get('name')}")
def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection):
@@ -272,26 +284,30 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co
changed = False
params = dict()
- params['ConnectionInput'] = dict()
- params['ConnectionInput']['Name'] = module.params.get("name")
- params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type")
- params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties")
+ params["ConnectionInput"] = dict()
+ params["ConnectionInput"]["Name"] = module.params.get("name")
+ params["ConnectionInput"]["ConnectionType"] = module.params.get("connection_type")
+ params["ConnectionInput"]["ConnectionProperties"] = module.params.get("connection_properties")
if module.params.get("catalog_id") is not None:
- params['CatalogId'] = module.params.get("catalog_id")
+ params["CatalogId"] = module.params.get("catalog_id")
if module.params.get("description") is not None:
- params['ConnectionInput']['Description'] = module.params.get("description")
+ params["ConnectionInput"]["Description"] = module.params.get("description")
if module.params.get("match_criteria") is not None:
- params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria")
+ params["ConnectionInput"]["MatchCriteria"] = module.params.get("match_criteria")
if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None:
- params['ConnectionInput']['PhysicalConnectionRequirements'] = dict()
+ params["ConnectionInput"]["PhysicalConnectionRequirements"] = dict()
if module.params.get("security_groups") is not None:
# Get security group IDs from names
- security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True)
- params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids
+ security_group_ids = get_ec2_security_group_ids_from_names(
+ module.params.get("security_groups"), connection_ec2, boto3=True
+ )
+ params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = security_group_ids
if module.params.get("subnet_id") is not None:
- params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id")
+ params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] = module.params.get("subnet_id")
if module.params.get("availability_zone") is not None:
- params['ConnectionInput']['PhysicalConnectionRequirements']['AvailabilityZone'] = module.params.get("availability_zone")
+ params["ConnectionInput"]["PhysicalConnectionRequirements"]["AvailabilityZone"] = module.params.get(
+ "availability_zone"
+ )
# If glue_connection is not None then check if it needs to be modified, else create it
if glue_connection:
@@ -299,7 +315,7 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co
try:
# We need to slightly modify the params for an update
update_params = copy.deepcopy(params)
- update_params['Name'] = update_params['ConnectionInput']['Name']
+ update_params["Name"] = update_params["ConnectionInput"]["Name"]
if not module.check_mode:
connection.update_connection(aws_retry=True, **update_params)
changed = True
@@ -318,12 +334,19 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co
glue_connection = _await_glue_connection(connection, module)
if glue_connection:
- module.deprecate("The 'connection_properties' return key is deprecated and will be replaced"
- " by 'raw_connection_properties'. Both values are returned for now.",
- date='2024-06-01', collection_name='community.aws')
- glue_connection['RawConnectionProperties'] = glue_connection['ConnectionProperties']
+ module.deprecate(
+ (
+ "The 'connection_properties' return key is deprecated and will be replaced"
+ " by 'raw_connection_properties'. Both values are returned for now."
+ ),
+ date="2024-06-01",
+ collection_name="community.aws",
+ )
+ glue_connection["RawConnectionProperties"] = glue_connection["ConnectionProperties"]
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=['RawConnectionProperties']))
+ module.exit_json(
+ changed=changed, **camel_dict_to_snake_dict(glue_connection or {}, ignore_list=["RawConnectionProperties"])
+ )
def delete_glue_connection(connection, module, glue_connection):
@@ -337,9 +360,9 @@ def delete_glue_connection(connection, module, glue_connection):
"""
changed = False
- params = {'ConnectionName': module.params.get("name")}
+ params = {"ConnectionName": module.params.get("name")}
if module.params.get("catalog_id") is not None:
- params['CatalogId'] = module.params.get("catalog_id")
+ params["CatalogId"] = module.params.get("catalog_id")
if glue_connection:
try:
@@ -353,41 +376,41 @@ def delete_glue_connection(connection, module, glue_connection):
def main():
-
- argument_spec = (
- dict(
- availability_zone=dict(type='str'),
- catalog_id=dict(type='str'),
- connection_properties=dict(type='dict'),
- connection_type=dict(type='str', default='JDBC', choices=['CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK']),
- description=dict(type='str'),
- match_criteria=dict(type='list', elements='str'),
- name=dict(required=True, type='str'),
- security_groups=dict(type='list', elements='str'),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- subnet_id=dict(type='str')
- )
+ argument_spec = dict(
+ availability_zone=dict(type="str"),
+ catalog_id=dict(type="str"),
+ connection_properties=dict(type="dict"),
+ connection_type=dict(
+ type="str", default="JDBC", choices=["CUSTOM", "JDBC", "KAFKA", "MARKETPLACE", "MONGODB", "NETWORK"]
+ ),
+ description=dict(type="str"),
+ match_criteria=dict(type="list", elements="str"),
+ name=dict(required=True, type="str"),
+ security_groups=dict(type="list", elements="str"),
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ subnet_id=dict(type="str"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ('state', 'present', ['connection_properties']),
- ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id'])
- ],
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ("state", "present", ["connection_properties"]),
+ ("connection_type", "NETWORK", ["availability_zone", "security_groups", "subnet_id"]),
+ ],
+ supports_check_mode=True,
+ )
retry_decorator = AWSRetry.jittered_backoff(retries=10)
- connection_glue = module.client('glue', retry_decorator=retry_decorator)
- connection_ec2 = module.client('ec2', retry_decorator=retry_decorator)
+ connection_glue = module.client("glue", retry_decorator=retry_decorator)
+ connection_ec2 = module.client("ec2", retry_decorator=retry_decorator)
glue_connection = _get_glue_connection(connection_glue, module)
- if module.params.get("state") == 'present':
+ if module.params.get("state") == "present":
create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection)
else:
delete_glue_connection(connection_glue, module, glue_connection)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/glue_crawler.py b/ansible_collections/community/aws/plugins/modules/glue_crawler.py
index a47b8eb3f..5d92219df 100644
--- a/ansible_collections/community/aws/plugins/modules/glue_crawler.py
+++ b/ansible_collections/community/aws/plugins/modules/glue_crawler.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: glue_crawler
version_added: 4.1.0
@@ -77,13 +75,13 @@ options:
- Required when I(state=present).
type: dict
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an AWS Glue crawler
@@ -109,9 +107,9 @@ EXAMPLES = r'''
- community.aws.glue_crawler:
name: my-glue-crawler
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
creation_time:
description: The time and date that this crawler definition was created.
returned: when state is present
@@ -198,7 +196,7 @@ targets:
description: List of catalog targets.
returned: when state is present
type: list
-'''
+"""
try:
import botocore
@@ -208,22 +206,26 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def _get_glue_crawler(connection, module, glue_crawler_name):
- '''
+ """
Get an AWS Glue crawler based on name. If not found, return None.
- '''
+ """
try:
- return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)['Crawler']
- except is_boto3_error_code('EntityNotFoundException'):
+ return connection.get_crawler(aws_retry=True, Name=glue_crawler_name)["Crawler"]
+ except is_boto3_error_code("EntityNotFoundException"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
@@ -239,39 +241,58 @@ def _trim_target(target):
if not target:
return None
retval = target.copy()
- if not retval.get('Exclusions', None):
- retval.pop('Exclusions', None)
+ if not retval.get("Exclusions", None):
+ retval.pop("Exclusions", None)
return retval
def _compare_glue_crawler_params(user_params, current_params):
- '''
+ """
Compare Glue crawler params. If there is a difference, return True immediately else return False
- '''
- if 'DatabaseName' in user_params and user_params['DatabaseName'] != current_params['DatabaseName']:
+ """
+ if "DatabaseName" in user_params and user_params["DatabaseName"] != current_params["DatabaseName"]:
return True
- if 'Description' in user_params and user_params['Description'] != current_params['Description']:
+ if "Description" in user_params and user_params["Description"] != current_params["Description"]:
return True
- if 'RecrawlPolicy' in user_params and user_params['RecrawlPolicy'] != current_params['RecrawlPolicy']:
+ if "RecrawlPolicy" in user_params and user_params["RecrawlPolicy"] != current_params["RecrawlPolicy"]:
return True
- if 'Role' in user_params and user_params['Role'] != current_params['Role']:
+ if "Role" in user_params and user_params["Role"] != current_params["Role"]:
return True
- if 'SchemaChangePolicy' in user_params and user_params['SchemaChangePolicy'] != current_params['SchemaChangePolicy']:
+ if (
+ "SchemaChangePolicy" in user_params
+ and user_params["SchemaChangePolicy"] != current_params["SchemaChangePolicy"]
+ ):
return True
- if 'TablePrefix' in user_params and user_params['TablePrefix'] != current_params['TablePrefix']:
+ if "TablePrefix" in user_params and user_params["TablePrefix"] != current_params["TablePrefix"]:
return True
- if 'Targets' in user_params:
- if 'S3Targets' in user_params['Targets']:
- if _trim_targets(user_params['Targets']['S3Targets']) != _trim_targets(current_params['Targets']['S3Targets']):
+ if "Targets" in user_params:
+ if "S3Targets" in user_params["Targets"]:
+ if _trim_targets(user_params["Targets"]["S3Targets"]) != _trim_targets(
+ current_params["Targets"]["S3Targets"]
+ ):
return True
- if 'JdbcTargets' in user_params['Targets'] and user_params['Targets']['JdbcTargets'] != current_params['Targets']['JdbcTargets']:
- if _trim_targets(user_params['Targets']['JdbcTargets']) != _trim_targets(current_params['Targets']['JdbcTargets']):
+ if (
+ "JdbcTargets" in user_params["Targets"]
+ and user_params["Targets"]["JdbcTargets"] != current_params["Targets"]["JdbcTargets"]
+ ):
+ if _trim_targets(user_params["Targets"]["JdbcTargets"]) != _trim_targets(
+ current_params["Targets"]["JdbcTargets"]
+ ):
return True
- if 'MongoDBTargets' in user_params['Targets'] and user_params['Targets']['MongoDBTargets'] != current_params['Targets']['MongoDBTargets']:
+ if (
+ "MongoDBTargets" in user_params["Targets"]
+ and user_params["Targets"]["MongoDBTargets"] != current_params["Targets"]["MongoDBTargets"]
+ ):
return True
- if 'DynamoDBTargets' in user_params['Targets'] and user_params['Targets']['DynamoDBTargets'] != current_params['Targets']['DynamoDBTargets']:
+ if (
+ "DynamoDBTargets" in user_params["Targets"]
+ and user_params["Targets"]["DynamoDBTargets"] != current_params["Targets"]["DynamoDBTargets"]
+ ):
return True
- if 'CatalogTargets' in user_params['Targets'] and user_params['Targets']['CatalogTargets'] != current_params['Targets']['CatalogTargets']:
+ if (
+ "CatalogTargets" in user_params["Targets"]
+ and user_params["Targets"]["CatalogTargets"] != current_params["Targets"]["CatalogTargets"]
+ ):
return True
return False
@@ -280,21 +301,23 @@ def _compare_glue_crawler_params(user_params, current_params):
def ensure_tags(connection, module, glue_crawler):
changed = False
- if module.params.get('tags') is None:
+ if module.params.get("tags") is None:
return False
account_id, partition = get_aws_account_info(module)
- arn = 'arn:{0}:glue:{1}:{2}:crawler/{3}'.format(partition, module.region, account_id, module.params.get('name'))
+ arn = f"arn:{partition}:glue:{module.region}:{account_id}:crawler/{module.params.get('name')}"
try:
- existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {})
+ existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if module.check_mode:
existing_tags = {}
else:
- module.fail_json_aws(e, msg='Unable to get tags for Glue crawler %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to get tags for Glue crawler {module.params.get('name')}")
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags'))
+ tags_to_add, tags_to_remove = compare_aws_tags(
+ existing_tags, module.params.get("tags"), module.params.get("purge_tags")
+ )
if tags_to_remove:
changed = True
@@ -302,7 +325,7 @@ def ensure_tags(connection, module, glue_crawler):
try:
connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}")
if tags_to_add:
changed = True
@@ -310,35 +333,37 @@ def ensure_tags(connection, module, glue_crawler):
try:
connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Glue crawler %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}")
return changed
def create_or_update_glue_crawler(connection, module, glue_crawler):
- '''
+ """
Create or update an AWS Glue crawler
- '''
+ """
changed = False
params = dict()
- params['Name'] = module.params.get('name')
- params['Role'] = module.params.get('role')
- params['Targets'] = module.params.get('targets')
- if module.params.get('database_name') is not None:
- params['DatabaseName'] = module.params.get('database_name')
- if module.params.get('description') is not None:
- params['Description'] = module.params.get('description')
- if module.params.get('recrawl_policy') is not None:
- params['RecrawlPolicy'] = snake_dict_to_camel_dict(module.params.get('recrawl_policy'), capitalize_first=True)
- if module.params.get('role') is not None:
- params['Role'] = module.params.get('role')
- if module.params.get('schema_change_policy') is not None:
- params['SchemaChangePolicy'] = snake_dict_to_camel_dict(module.params.get('schema_change_policy'), capitalize_first=True)
- if module.params.get('table_prefix') is not None:
- params['TablePrefix'] = module.params.get('table_prefix')
- if module.params.get('targets') is not None:
- params['Targets'] = module.params.get('targets')
+ params["Name"] = module.params.get("name")
+ params["Role"] = module.params.get("role")
+ params["Targets"] = module.params.get("targets")
+ if module.params.get("database_name") is not None:
+ params["DatabaseName"] = module.params.get("database_name")
+ if module.params.get("description") is not None:
+ params["Description"] = module.params.get("description")
+ if module.params.get("recrawl_policy") is not None:
+ params["RecrawlPolicy"] = snake_dict_to_camel_dict(module.params.get("recrawl_policy"), capitalize_first=True)
+ if module.params.get("role") is not None:
+ params["Role"] = module.params.get("role")
+ if module.params.get("schema_change_policy") is not None:
+ params["SchemaChangePolicy"] = snake_dict_to_camel_dict(
+ module.params.get("schema_change_policy"), capitalize_first=True
+ )
+ if module.params.get("table_prefix") is not None:
+ params["TablePrefix"] = module.params.get("table_prefix")
+ if module.params.get("targets") is not None:
+ params["Targets"] = module.params.get("targets")
if glue_crawler:
if _compare_glue_crawler_params(params, glue_crawler):
@@ -356,23 +381,26 @@ def create_or_update_glue_crawler(connection, module, glue_crawler):
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e)
- glue_crawler = _get_glue_crawler(connection, module, params['Name'])
+ glue_crawler = _get_glue_crawler(connection, module, params["Name"])
changed |= ensure_tags(connection, module, glue_crawler)
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=['SchemaChangePolicy', 'RecrawlPolicy', 'Targets']))
+ module.exit_json(
+ changed=changed,
+ **camel_dict_to_snake_dict(glue_crawler or {}, ignore_list=["SchemaChangePolicy", "RecrawlPolicy", "Targets"]),
+ )
def delete_glue_crawler(connection, module, glue_crawler):
- '''
+ """
Delete an AWS Glue crawler
- '''
+ """
changed = False
if glue_crawler:
try:
if not module.check_mode:
- connection.delete_crawler(aws_retry=True, Name=glue_crawler['Name'])
+ connection.delete_crawler(aws_retry=True, Name=glue_crawler["Name"])
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e)
@@ -381,46 +409,39 @@ def delete_glue_crawler(connection, module, glue_crawler):
def main():
-
- argument_spec = (
- dict(
- database_name=dict(type='str'),
- description=dict(type='str'),
- name=dict(required=True, type='str'),
- purge_tags=dict(type='bool', default=True),
- recrawl_policy=dict(type='dict', options=dict(
- recrawl_behavior=dict(type='str')
- )),
- role=dict(type='str'),
- schema_change_policy=dict(type='dict', options=dict(
- delete_behavior=dict(type='str'),
- update_behavior=dict(type='str')
- )),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- table_prefix=dict(type='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
- targets=dict(type='dict')
- )
+ argument_spec = dict(
+ database_name=dict(type="str"),
+ description=dict(type="str"),
+ name=dict(required=True, type="str"),
+ purge_tags=dict(type="bool", default=True),
+ recrawl_policy=dict(type="dict", options=dict(recrawl_behavior=dict(type="str"))),
+ role=dict(type="str"),
+ schema_change_policy=dict(
+ type="dict", options=dict(delete_behavior=dict(type="str"), update_behavior=dict(type="str"))
+ ),
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ table_prefix=dict(type="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ targets=dict(type="dict"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ('state', 'present', ['role', 'targets'])
- ],
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[("state", "present", ["role", "targets"])],
+ supports_check_mode=True,
+ )
- connection = module.client('glue', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ connection = module.client("glue", retry_decorator=AWSRetry.jittered_backoff(retries=10))
- state = module.params.get('state')
+ state = module.params.get("state")
- glue_crawler = _get_glue_crawler(connection, module, module.params.get('name'))
+ glue_crawler = _get_glue_crawler(connection, module, module.params.get("name"))
- if state == 'present':
+ if state == "present":
create_or_update_glue_crawler(connection, module, glue_crawler)
else:
delete_glue_crawler(connection, module, glue_crawler)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/glue_job.py b/ansible_collections/community/aws/plugins/modules/glue_job.py
index 47d6156d7..256779975 100644
--- a/ansible_collections/community/aws/plugins/modules/glue_job.py
+++ b/ansible_collections/community/aws/plugins/modules/glue_job.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: glue_job
version_added: 1.0.0
@@ -103,13 +101,13 @@ options:
notes:
- Support for I(tags) and I(purge_tags) was added in release 2.2.0.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an AWS Glue job
@@ -126,9 +124,9 @@ EXAMPLES = r'''
- community.aws.glue_job:
name: my-glue-job
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
allocated_capacity:
description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to
100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power
@@ -223,10 +221,10 @@ timeout:
returned: when state is present
type: int
sample: 300
-'''
+"""
-# Non-ansible imports
import copy
+
try:
import botocore
except ImportError:
@@ -234,11 +232,12 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def _get_glue_job(connection, module, glue_job_name):
@@ -251,10 +250,13 @@ def _get_glue_job(connection, module, glue_job_name):
:return: boto3 Glue job dict or None if not found
"""
try:
- return connection.get_job(aws_retry=True, JobName=glue_job_name)['Job']
- except is_boto3_error_code('EntityNotFoundException'):
+ return connection.get_job(aws_retry=True, JobName=glue_job_name)["Job"]
+ except is_boto3_error_code("EntityNotFoundException"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
@@ -269,39 +271,43 @@ def _compare_glue_job_params(user_params, current_params):
# Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
# To counter this, add the key if it's missing with a blank value
- if 'Description' not in current_params:
- current_params['Description'] = ""
- if 'DefaultArguments' not in current_params:
- current_params['DefaultArguments'] = dict()
+ if "Description" not in current_params:
+ current_params["Description"] = ""
+ if "DefaultArguments" not in current_params:
+ current_params["DefaultArguments"] = dict()
- if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']:
+ if "AllocatedCapacity" in user_params and user_params["AllocatedCapacity"] != current_params["AllocatedCapacity"]:
return True
- if 'Command' in user_params:
- if user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']:
+ if "Command" in user_params:
+ if user_params["Command"]["ScriptLocation"] != current_params["Command"]["ScriptLocation"]:
return True
- if user_params['Command']['PythonVersion'] != current_params['Command']['PythonVersion']:
+ if user_params["Command"]["PythonVersion"] != current_params["Command"]["PythonVersion"]:
return True
- if 'Connections' in user_params and user_params['Connections'] != current_params['Connections']:
+ if "Connections" in user_params and user_params["Connections"] != current_params["Connections"]:
return True
- if 'DefaultArguments' in user_params and user_params['DefaultArguments'] != current_params['DefaultArguments']:
+ if "DefaultArguments" in user_params and user_params["DefaultArguments"] != current_params["DefaultArguments"]:
return True
- if 'Description' in user_params and user_params['Description'] != current_params['Description']:
+ if "Description" in user_params and user_params["Description"] != current_params["Description"]:
return True
- if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']:
+ if (
+ "ExecutionProperty" in user_params
+ and user_params["ExecutionProperty"]["MaxConcurrentRuns"]
+ != current_params["ExecutionProperty"]["MaxConcurrentRuns"]
+ ):
return True
- if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']:
+ if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]:
return True
- if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']:
+ if "MaxRetries" in user_params and user_params["MaxRetries"] != current_params["MaxRetries"]:
return True
- if 'Role' in user_params and user_params['Role'] != current_params['Role']:
+ if "Role" in user_params and user_params["Role"] != current_params["Role"]:
return True
- if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']:
+ if "Timeout" in user_params and user_params["Timeout"] != current_params["Timeout"]:
return True
- if 'GlueVersion' in user_params and user_params['GlueVersion'] != current_params['GlueVersion']:
+ if "GlueVersion" in user_params and user_params["GlueVersion"] != current_params["GlueVersion"]:
return True
- if 'WorkerType' in user_params and user_params['WorkerType'] != current_params['WorkerType']:
+ if "WorkerType" in user_params and user_params["WorkerType"] != current_params["WorkerType"]:
return True
- if 'NumberOfWorkers' in user_params and user_params['NumberOfWorkers'] != current_params['NumberOfWorkers']:
+ if "NumberOfWorkers" in user_params and user_params["NumberOfWorkers"] != current_params["NumberOfWorkers"]:
return True
return False
@@ -310,21 +316,23 @@ def _compare_glue_job_params(user_params, current_params):
def ensure_tags(connection, module, glue_job):
changed = False
- if module.params.get('tags') is None:
+ if module.params.get("tags") is None:
return False
account_id, partition = get_aws_account_info(module)
- arn = 'arn:{0}:glue:{1}:{2}:job/{3}'.format(partition, module.region, account_id, module.params.get('name'))
+ arn = f"arn:{partition}:glue:{module.region}:{account_id}:job/{module.params.get('name')}"
try:
- existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get('Tags', {})
+ existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if module.check_mode:
existing_tags = {}
else:
- module.fail_json_aws(e, msg='Unable to get tags for Glue job %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to get tags for Glue job {module.params.get('name')}")
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags'))
+ tags_to_add, tags_to_remove = compare_aws_tags(
+ existing_tags, module.params.get("tags"), module.params.get("purge_tags")
+ )
if tags_to_remove:
changed = True
@@ -332,7 +340,7 @@ def ensure_tags(connection, module, glue_job):
try:
connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}")
if tags_to_add:
changed = True
@@ -340,7 +348,7 @@ def ensure_tags(connection, module, glue_job):
try:
connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for Glue job %s' % module.params.get('name'))
+ module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}")
return changed
@@ -357,42 +365,45 @@ def create_or_update_glue_job(connection, module, glue_job):
changed = False
params = dict()
- params['Name'] = module.params.get("name")
- params['Role'] = module.params.get("role")
+ params["Name"] = module.params.get("name")
+ params["Role"] = module.params.get("role")
if module.params.get("allocated_capacity") is not None:
- params['AllocatedCapacity'] = module.params.get("allocated_capacity")
+ params["AllocatedCapacity"] = module.params.get("allocated_capacity")
if module.params.get("command_script_location") is not None:
- params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")}
+ params["Command"] = {
+ "Name": module.params.get("command_name"),
+ "ScriptLocation": module.params.get("command_script_location"),
+ }
if module.params.get("command_python_version") is not None:
- params['Command']['PythonVersion'] = module.params.get("command_python_version")
+ params["Command"]["PythonVersion"] = module.params.get("command_python_version")
if module.params.get("connections") is not None:
- params['Connections'] = {'Connections': module.params.get("connections")}
+ params["Connections"] = {"Connections": module.params.get("connections")}
if module.params.get("default_arguments") is not None:
- params['DefaultArguments'] = module.params.get("default_arguments")
+ params["DefaultArguments"] = module.params.get("default_arguments")
if module.params.get("description") is not None:
- params['Description'] = module.params.get("description")
+ params["Description"] = module.params.get("description")
if module.params.get("glue_version") is not None:
- params['GlueVersion'] = module.params.get("glue_version")
+ params["GlueVersion"] = module.params.get("glue_version")
if module.params.get("max_concurrent_runs") is not None:
- params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")}
+ params["ExecutionProperty"] = {"MaxConcurrentRuns": module.params.get("max_concurrent_runs")}
if module.params.get("max_retries") is not None:
- params['MaxRetries'] = module.params.get("max_retries")
+ params["MaxRetries"] = module.params.get("max_retries")
if module.params.get("timeout") is not None:
- params['Timeout'] = module.params.get("timeout")
+ params["Timeout"] = module.params.get("timeout")
if module.params.get("glue_version") is not None:
- params['GlueVersion'] = module.params.get("glue_version")
+ params["GlueVersion"] = module.params.get("glue_version")
if module.params.get("worker_type") is not None:
- params['WorkerType'] = module.params.get("worker_type")
+ params["WorkerType"] = module.params.get("worker_type")
if module.params.get("number_of_workers") is not None:
- params['NumberOfWorkers'] = module.params.get("number_of_workers")
+ params["NumberOfWorkers"] = module.params.get("number_of_workers")
# If glue_job is not None then check if it needs to be modified, else create it
if glue_job:
if _compare_glue_job_params(params, glue_job):
try:
# Update job needs slightly modified params
- update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)}
- del update_params['JobUpdate']['Name']
+ update_params = {"JobName": params["Name"], "JobUpdate": copy.deepcopy(params)}
+ del update_params["JobUpdate"]["Name"]
if not module.check_mode:
connection.update_job(aws_retry=True, **update_params)
changed = True
@@ -406,11 +417,11 @@ def create_or_update_glue_job(connection, module, glue_job):
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e)
- glue_job = _get_glue_job(connection, module, params['Name'])
+ glue_job = _get_glue_job(connection, module, params["Name"])
changed |= ensure_tags(connection, module, glue_job)
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=['DefaultArguments']))
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job or {}, ignore_list=["DefaultArguments"]))
def delete_glue_job(connection, module, glue_job):
@@ -427,7 +438,7 @@ def delete_glue_job(connection, module, glue_job):
if glue_job:
try:
if not module.check_mode:
- connection.delete_job(aws_retry=True, JobName=glue_job['Name'])
+ connection.delete_job(aws_retry=True, JobName=glue_job["Name"])
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e)
@@ -436,49 +447,45 @@ def delete_glue_job(connection, module, glue_job):
def main():
-
- argument_spec = (
- dict(
- allocated_capacity=dict(type='int'),
- command_name=dict(type='str', default='glueetl'),
- command_python_version=dict(type='str'),
- command_script_location=dict(type='str'),
- connections=dict(type='list', elements='str'),
- default_arguments=dict(type='dict'),
- description=dict(type='str'),
- glue_version=dict(type='str'),
- max_concurrent_runs=dict(type='int'),
- max_retries=dict(type='int'),
- name=dict(required=True, type='str'),
- number_of_workers=dict(type='int'),
- purge_tags=dict(type='bool', default=True),
- role=dict(type='str'),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
- timeout=dict(type='int'),
- worker_type=dict(choices=['Standard', 'G.1X', 'G.2X'], type='str'),
- )
+ argument_spec = dict(
+ allocated_capacity=dict(type="int"),
+ command_name=dict(type="str", default="glueetl"),
+ command_python_version=dict(type="str"),
+ command_script_location=dict(type="str"),
+ connections=dict(type="list", elements="str"),
+ default_arguments=dict(type="dict"),
+ description=dict(type="str"),
+ glue_version=dict(type="str"),
+ max_concurrent_runs=dict(type="int"),
+ max_retries=dict(type="int"),
+ name=dict(required=True, type="str"),
+ number_of_workers=dict(type="int"),
+ purge_tags=dict(type="bool", default=True),
+ role=dict(type="str"),
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ timeout=dict(type="int"),
+ worker_type=dict(choices=["Standard", "G.1X", "G.2X"], type="str"),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ('state', 'present', ['role', 'command_script_location'])
- ],
- supports_check_mode=True
- )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[("state", "present", ["role", "command_script_location"])],
+ supports_check_mode=True,
+ )
retry_decorator = AWSRetry.jittered_backoff(retries=10)
- connection = module.client('glue', retry_decorator=retry_decorator)
+ connection = module.client("glue", retry_decorator=retry_decorator)
state = module.params.get("state")
glue_job = _get_glue_job(connection, module, module.params.get("name"))
- if state == 'present':
+ if state == "present":
create_or_update_glue_job(connection, module, glue_job)
else:
delete_glue_job(connection, module, glue_job)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_access_key.py b/ansible_collections/community/aws/plugins/modules/iam_access_key.py
deleted file mode 100644
index ad61b5b2a..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_access_key.py
+++ /dev/null
@@ -1,317 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2021 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: iam_access_key
-version_added: 2.1.0
-short_description: Manage AWS IAM User access keys
-description:
- - Manage AWS IAM user access keys.
-author: Mark Chappell (@tremble)
-options:
- user_name:
- description:
- - The name of the IAM User to which the key belongs.
- required: true
- type: str
- aliases: ['username']
- id:
- description:
- - The ID of the access key.
- - Required when I(state=absent).
- - Mutually exclusive with I(rotate_keys).
- required: false
- type: str
- state:
- description:
- - Create or remove the access key.
- - When I(state=present) and I(id) is not defined a new key will be created.
- required: false
- type: str
- default: 'present'
- choices: [ 'present', 'absent' ]
- active:
- description:
- - Whether the key should be enabled or disabled.
- - Defaults to C(true) when creating a new key.
- required: false
- type: bool
- aliases: ['enabled']
- rotate_keys:
- description:
- - When there are already 2 access keys attached to the IAM user the oldest
- key will be removed and a new key created.
- - Ignored if I(state=absent)
- - Mutually exclusive with I(id).
- required: false
- type: bool
- default: false
-
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
-
-EXAMPLES = r'''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create a new access key
- community.aws.iam_access_key:
- user_name: example_user
- state: present
-
-- name: Delete the access_key
- community.aws.iam_access_key:
- user_name: example_user
- id: AKIA1EXAMPLE1EXAMPLE
- state: absent
-'''
-
-RETURN = r'''
-access_key:
- description: A dictionary containing all the access key information.
- returned: When the key exists.
- type: complex
- contains:
- access_key_id:
- description: The ID for the access key.
- returned: success
- type: str
- sample: AKIA1EXAMPLE1EXAMPLE
- create_date:
- description: The date and time, in ISO 8601 date-time format, when the access key was created.
- returned: success
- type: str
- sample: "2021-10-09T13:25:42+00:00"
- user_name:
- description: The name of the IAM user to which the key is attached.
- returned: success
- type: str
- sample: example_user
- status:
- description:
- - The status of the key.
- - C(Active) means it can be used.
- - C(Inactive) means it can not be used.
- returned: success
- type: str
- sample: Inactive
-secret_access_key:
- description:
- - The secret access key.
- - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data.
- - Secret access keys can only be accessed at creation time.
- returned: When a new key is created.
- type: str
- sample: example/Example+EXAMPLE+example/Example
-deleted_access_key_id:
- description:
- - The access key deleted during rotation.
- returned: When a key was deleted during the rotation of access keys
- type: str
- sample: AKIA1EXAMPLE1EXAMPLE
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
-from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-
-
-def delete_access_key(access_keys, user, access_key_id):
- if not access_key_id:
- return False
-
- if access_key_id not in access_keys:
- return False
-
- if module.check_mode:
- return True
-
- try:
- client.delete_access_key(
- aws_retry=True,
- UserName=user,
- AccessKeyId=access_key_id,
- )
- except is_boto3_error_code('NoSuchEntityException'):
- # Generally occurs when race conditions have happened and someone
- # deleted the key while we were checking to see if it existed.
- return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(
- e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user)
- )
-
- return True
-
-
-def update_access_key(access_keys, user, access_key_id, enabled):
- if access_key_id not in access_keys:
- module.fail_json(
- msg='Access key "{0}" not found attached to User "{1}"'.format(access_key_id, user),
- )
-
- changes = dict()
- access_key = access_keys.get(access_key_id)
-
- if enabled is not None:
- desired_status = 'Active' if enabled else 'Inactive'
- if access_key.get('status') != desired_status:
- changes['Status'] = desired_status
-
- if not changes:
- return False
-
- if module.check_mode:
- return True
-
- try:
- client.update_access_key(
- aws_retry=True,
- UserName=user,
- AccessKeyId=access_key_id,
- **changes
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, changes=changes,
- msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user),
- )
- return True
-
-
-def create_access_key(access_keys, user, rotate_keys, enabled):
- changed = False
- oldest_key = False
-
- if len(access_keys) > 1 and rotate_keys:
- sorted_keys = sorted(list(access_keys), key=lambda k: access_keys[k].get('create_date', None))
- oldest_key = sorted_keys[0]
- changed |= delete_access_key(access_keys, user, oldest_key)
-
- if module.check_mode:
- if changed:
- return dict(deleted_access_key=oldest_key)
- return True
-
- try:
- results = client.create_access_key(aws_retry=True, UserName=user)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user))
- results = camel_dict_to_snake_dict(results)
- access_key = results.get('access_key')
- access_key = normalize_boto3_result(access_key)
-
- # Update settings which can't be managed on creation
- if enabled is False:
- access_key_id = access_key['access_key_id']
- access_keys = {access_key_id: access_key}
- update_access_key(access_keys, user, access_key_id, enabled)
- access_key['status'] = 'Inactive'
-
- if oldest_key:
- access_key['deleted_access_key'] = oldest_key
-
- return access_key
-
-
-def get_access_keys(user):
- try:
- results = client.list_access_keys(aws_retry=True, UserName=user)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg='Failed to get access keys for user "{0}"'.format(user)
- )
- if not results:
- return None
-
- results = camel_dict_to_snake_dict(results)
- access_keys = results.get('access_key_metadata', [])
- if not access_keys:
- return []
-
- access_keys = normalize_boto3_result(access_keys)
- access_keys = {k['access_key_id']: k for k in access_keys}
- return access_keys
-
-
-def main():
-
- global module
- global client
-
- argument_spec = dict(
- user_name=dict(required=True, type='str', aliases=['username']),
- id=dict(required=False, type='str'),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- active=dict(required=False, type='bool', aliases=['enabled']),
- rotate_keys=dict(required=False, type='bool', default=False),
- )
-
- required_if = [
- ['state', 'absent', ('id')],
- ]
- mutually_exclusive = [
- ['rotate_keys', 'id'],
- ]
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
-
- changed = False
- state = module.params.get('state')
- user = module.params.get('user_name')
- access_key_id = module.params.get('id')
- rotate_keys = module.params.get('rotate_keys')
- enabled = module.params.get('active')
-
- access_keys = get_access_keys(user)
- results = dict()
-
- if state == 'absent':
- changed |= delete_access_key(access_keys, user, access_key_id)
- else:
- # If we have an ID then we should try to update it
- if access_key_id:
- changed |= update_access_key(access_keys, user, access_key_id, enabled)
- access_keys = get_access_keys(user)
- results['access_key'] = access_keys.get(access_key_id, None)
- # Otherwise we try to create a new one
- else:
- secret_key = create_access_key(access_keys, user, rotate_keys, enabled)
- if isinstance(secret_key, bool):
- changed |= secret_key
- else:
- changed = True
- results['access_key_id'] = secret_key.get('access_key_id', None)
- results['secret_access_key'] = secret_key.pop('secret_access_key', None)
- results['deleted_access_key_id'] = secret_key.pop('deleted_access_key', None)
- if secret_key:
- results['access_key'] = secret_key
- results = scrub_none_parameters(results)
-
- module.exit_json(changed=changed, **results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py b/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py
deleted file mode 100644
index 91429eff9..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_access_key_info.py
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2021 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: iam_access_key_info
-version_added: 2.1.0
-short_description: fetch information about AWS IAM User access keys
-description:
- - 'Fetches information AWS IAM user access keys.'
- - 'Note: It is not possible to fetch the secret access key.'
-author: Mark Chappell (@tremble)
-options:
- user_name:
- description:
- - The name of the IAM User to which the keys belong.
- required: true
- type: str
- aliases: ['username']
-
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
-
-EXAMPLES = r'''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Fetch Access keys for a user
- community.aws.iam_access_key_info:
- user_name: example_user
-'''
-
-RETURN = r'''
-access_key:
- description: A dictionary containing all the access key information.
- returned: When the key exists.
- type: list
- elements: dict
- contains:
- access_key_id:
- description: The ID for the access key.
- returned: success
- type: str
- sample: AKIA1EXAMPLE1EXAMPLE
- create_date:
- description: The date and time, in ISO 8601 date-time format, when the access key was created.
- returned: success
- type: str
- sample: "2021-10-09T13:25:42+00:00"
- user_name:
- description: The name of the IAM user to which the key is attached.
- returned: success
- type: str
- sample: example_user
- status:
- description:
- - The status of the key.
- - C(Active) means it can be used.
- - C(Inactive) means it can not be used.
- returned: success
- type: str
- sample: Inactive
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-
-
-def get_access_keys(user):
- try:
- results = client.list_access_keys(aws_retry=True, UserName=user)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg='Failed to get access keys for user "{0}"'.format(user)
- )
- if not results:
- return None
-
- results = camel_dict_to_snake_dict(results)
- access_keys = results.get('access_key_metadata', [])
- if not access_keys:
- return []
-
- access_keys = normalize_boto3_result(access_keys)
- access_keys = sorted(access_keys, key=lambda d: d.get('create_date', None))
- return access_keys
-
-
-def main():
-
- global module
- global client
-
- argument_spec = dict(
- user_name=dict(required=True, type='str', aliases=['username']),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
-
- changed = False
- user = module.params.get('user_name')
- access_keys = get_access_keys(user)
-
- module.exit_json(changed=changed, access_keys=access_keys)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_group.py b/ansible_collections/community/aws/plugins/modules/iam_group.py
deleted file mode 100644
index 31987ef1d..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_group.py
+++ /dev/null
@@ -1,433 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: iam_group
-version_added: 1.0.0
-short_description: Manage AWS IAM groups
-description:
- - Manage AWS IAM groups.
-author:
-- Nick Aslanidis (@naslanidis)
-- Maksym Postument (@infectsoldier)
-options:
- name:
- description:
- - The name of the group to create.
- required: true
- type: str
- managed_policies:
- description:
- - A list of managed policy ARNs or friendly names to attach to the role.
- - To embed an inline policy, use M(community.aws.iam_policy).
- required: false
- type: list
- elements: str
- default: []
- aliases: ['managed_policy']
- users:
- description:
- - A list of existing users to add as members of the group.
- required: false
- type: list
- elements: str
- default: []
- state:
- description:
- - Create or remove the IAM group.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
- purge_policies:
- description:
- - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- required: false
- default: false
- type: bool
- aliases: ['purge_policy', 'purge_managed_policies']
- purge_users:
- description:
- - When I(purge_users=true) users which are not included in I(users) will be detached.
- required: false
- default: false
- type: bool
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
-
-EXAMPLES = r'''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create a group
- community.aws.iam_group:
- name: testgroup1
- state: present
-
-- name: Create a group and attach a managed policy using its ARN
- community.aws.iam_group:
- name: testgroup1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- state: present
-
-- name: Create a group with users as members and attach a managed policy using its ARN
- community.aws.iam_group:
- name: testgroup1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- users:
- - test_user1
- - test_user2
- state: present
-
-- name: Remove all managed policies from an existing group with an empty list
- community.aws.iam_group:
- name: testgroup1
- state: present
- purge_policies: true
-
-- name: Remove all group members from an existing group
- community.aws.iam_group:
- name: testgroup1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- purge_users: true
- state: present
-
-- name: Delete the group
- community.aws.iam_group:
- name: testgroup1
- state: absent
-
-'''
-RETURN = r'''
-iam_group:
- description: dictionary containing all the group information including group membership
- returned: success
- type: complex
- contains:
- group:
- description: dictionary containing all the group information
- returned: success
- type: complex
- contains:
- arn:
- description: the Amazon Resource Name (ARN) specifying the group
- type: str
- sample: "arn:aws:iam::1234567890:group/testgroup1"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the group was created
- type: str
- sample: "2017-02-08T04:36:28+00:00"
- group_id:
- description: the stable and unique string identifying the group
- type: str
- sample: AGPA12345EXAMPLE54321
- group_name:
- description: the friendly name that identifies the group
- type: str
- sample: testgroup1
- path:
- description: the path to the group
- type: str
- sample: /
- users:
- description: list containing all the group members
- returned: success
- type: complex
- contains:
- arn:
- description: the Amazon Resource Name (ARN) specifying the user
- type: str
- sample: "arn:aws:iam::1234567890:user/test_user1"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the user was created
- type: str
- sample: "2017-02-08T04:36:28+00:00"
- user_id:
- description: the stable and unique string identifying the user
- type: str
- sample: AIDA12345EXAMPLE54321
- user_name:
- description: the friendly name that identifies the user
- type: str
- sample: testgroup1
- path:
- description: the path to the user
- type: str
- sample: /
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-
-
-def compare_attached_group_policies(current_attached_policies, new_attached_policies):
-
- # If new_attached_policies is None it means we want to remove all policies
- if len(current_attached_policies) > 0 and new_attached_policies is None:
- return False
-
- current_attached_policies_arn_list = []
- for policy in current_attached_policies:
- current_attached_policies_arn_list.append(policy['PolicyArn'])
-
- if set(current_attached_policies_arn_list) == set(new_attached_policies):
- return True
- else:
- return False
-
-
-def compare_group_members(current_group_members, new_group_members):
-
- # If new_attached_policies is None it means we want to remove all policies
- if len(current_group_members) > 0 and new_group_members is None:
- return False
- if set(current_group_members) == set(new_group_members):
- return True
- else:
- return False
-
-
-def convert_friendly_names_to_arns(connection, module, policy_names):
-
- if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None):
- return policy_names
- allpolicies = {}
- paginator = connection.get_paginator('list_policies')
- policies = paginator.paginate().build_full_result()['Policies']
-
- for policy in policies:
- allpolicies[policy['PolicyName']] = policy['Arn']
- allpolicies[policy['Arn']] = policy['Arn']
- try:
- return [allpolicies[policy] for policy in policy_names]
- except KeyError as e:
- module.fail_json(msg="Couldn't find policy: " + str(e))
-
-
-def create_or_update_group(connection, module):
-
- params = dict()
- params['GroupName'] = module.params.get('name')
- managed_policies = module.params.get('managed_policies')
- users = module.params.get('users')
- purge_users = module.params.get('purge_users')
- purge_policies = module.params.get('purge_policies')
- changed = False
- if managed_policies:
- managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
-
- # Get group
- try:
- group = get_group(connection, module, params['GroupName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get group")
-
- # If group is None, create it
- if group is None:
- # Check mode means we would create the group
- if module.check_mode:
- module.exit_json(changed=True)
-
- try:
- group = connection.create_group(**params)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create group")
-
- # Manage managed policies
- current_attached_policies = get_attached_policy_list(connection, module, params['GroupName'])
- if not compare_attached_group_policies(current_attached_policies, managed_policies):
- current_attached_policies_arn_list = []
- for policy in current_attached_policies:
- current_attached_policies_arn_list.append(policy['PolicyArn'])
-
- # If managed_policies has a single empty element we want to remove all attached policies
- if purge_policies:
- # Detach policies not present
- for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
- changed = True
- if not module.check_mode:
- try:
- connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName'])
- # If there are policies to adjust that aren't in the current list, then things have changed
- # Otherwise the only changes were in purging above
- if set(managed_policies) - set(current_attached_policies_arn_list):
- changed = True
- # If there are policies in managed_policies attach each policy
- if managed_policies != [None] and not module.check_mode:
- for policy_arn in managed_policies:
- try:
- connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName'])
-
- # Manage group memberships
- try:
- current_group_members = get_group(connection, module, params['GroupName'])['Users']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
-
- current_group_members_list = []
- for member in current_group_members:
- current_group_members_list.append(member['UserName'])
-
- if not compare_group_members(current_group_members_list, users):
-
- if purge_users:
- for user in list(set(current_group_members_list) - set(users)):
- # Ensure we mark things have changed if any user gets purged
- changed = True
- # Skip actions for check mode
- if not module.check_mode:
- try:
- connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName']))
- # If there are users to adjust that aren't in the current list, then things have changed
- # Otherwise the only changes were in purging above
- if set(users) - set(current_group_members_list):
- changed = True
- # Skip actions for check mode
- if users != [None] and not module.check_mode:
- for user in users:
- try:
- connection.add_user_to_group(GroupName=params['GroupName'], UserName=user)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName']))
- if module.check_mode:
- module.exit_json(changed=changed)
-
- # Get the group again
- try:
- group = get_group(connection, module, params['GroupName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
-
- module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group))
-
-
-def destroy_group(connection, module):
-
- params = dict()
- params['GroupName'] = module.params.get('name')
-
- try:
- group = get_group(connection, module, params['GroupName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
- if group:
- # Check mode means we would remove this group
- if module.check_mode:
- module.exit_json(changed=True)
-
- # Remove any attached policies otherwise deletion fails
- try:
- for policy in get_attached_policy_list(connection, module, params['GroupName']):
- connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName'])
-
- # Remove any users in the group otherwise deletion fails
- current_group_members_list = []
- try:
- current_group_members = get_group(connection, module, params['GroupName'])['Users']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
- for member in current_group_members:
- current_group_members_list.append(member['UserName'])
- for user in current_group_members_list:
- try:
- connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName']))
-
- try:
- connection.delete_group(**params)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName'])
-
- else:
- module.exit_json(changed=False)
-
- module.exit_json(changed=True)
-
-
-@AWSRetry.exponential_backoff()
-def get_group(connection, module, name):
- try:
- paginator = connection.get_paginator('get_group')
- return paginator.paginate(GroupName=name).build_full_result()
- except is_boto3_error_code('NoSuchEntity'):
- return None
-
-
-@AWSRetry.exponential_backoff()
-def get_attached_policy_list(connection, module, name):
-
- try:
- paginator = connection.get_paginator('list_attached_group_policies')
- return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies']
- except is_boto3_error_code('NoSuchEntity'):
- return None
-
-
-def main():
-
- argument_spec = dict(
- name=dict(required=True),
- managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'),
- users=dict(default=[], type='list', elements='str'),
- state=dict(choices=['present', 'absent'], required=True),
- purge_users=dict(default=False, type='bool'),
- purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies'])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- connection = module.client('iam')
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_group(connection, module)
- else:
- destroy_group(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py b/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py
deleted file mode 100644
index f86f019d5..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_managed_policy.py
+++ /dev/null
@@ -1,371 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: iam_managed_policy
-version_added: 1.0.0
-short_description: Manage User Managed IAM policies
-description:
- - Allows creating and removing managed IAM policies
-options:
- policy_name:
- description:
- - The name of the managed policy.
- required: True
- type: str
- policy_description:
- description:
- - A helpful description of this policy, this value is immutable and only set when creating a new policy.
- default: ''
- type: str
- policy:
- description:
- - A properly json formatted policy
- type: json
- make_default:
- description:
- - Make this revision the default revision.
- default: True
- type: bool
- only_version:
- description:
- - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted.
- type: bool
- default: false
- state:
- description:
- - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found.
- default: present
- choices: [ "present", "absent" ]
- type: str
-
-author: "Dan Kozlowski (@dkhenry)"
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
-
-EXAMPLES = r'''
-# Create a policy
-- name: Create IAM Managed Policy
- community.aws.iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy_description: "A Helpful managed policy"
- policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
- state: present
-
-# Update a policy with a new default version
-- name: Update an IAM Managed Policy with new default version
- community.aws.iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy: "{{ lookup('file', 'managed_policy_update.json') }}"
- state: present
-
-# Update a policy with a new non default version
-- name: Update an IAM Managed Policy with a non default version
- community.aws.iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy:
- Version: "2012-10-17"
- Statement:
- - Effect: "Allow"
- Action: "logs:CreateLogGroup"
- Resource: "*"
- make_default: false
- state: present
-
-# Update a policy and make it the only version and the default version
-- name: Update an IAM Managed Policy with default version as the only version
- community.aws.iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy: |
- {
- "Version": "2012-10-17",
- "Statement":[{
- "Effect": "Allow",
- "Action": "logs:PutRetentionPolicy",
- "Resource": "*"
- }]
- }
- only_version: true
- state: present
-
-# Remove a policy
-- name: Remove an existing IAM Managed Policy
- community.aws.iam_managed_policy:
- policy_name: "ManagedPolicy"
- state: absent
-'''
-
-RETURN = r'''
-policy:
- description: Returns the policy json structure, when state == absent this will return the value of the removed policy.
- returned: success
- type: complex
- contains: {}
- sample: '{
- "arn": "arn:aws:iam::aws:policy/AdministratorAccess "
- "attachment_count": 0,
- "create_date": "2017-03-01T15:42:55.981000+00:00",
- "default_version_id": "v1",
- "is_attachable": true,
- "path": "/",
- "policy_id": "ANPA1245EXAMPLE54321",
- "policy_name": "AdministratorAccess",
- "update_date": "2017-03-01T15:42:55.981000+00:00"
- }'
-'''
-
-import json
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
-
-
-@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
-def list_policies_with_backoff():
- paginator = client.get_paginator('list_policies')
- return paginator.paginate(Scope='Local').build_full_result()
-
-
-def get_policy_by_name(name):
- try:
- response = list_policies_with_backoff()
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list policies")
- for policy in response['Policies']:
- if policy['PolicyName'] == name:
- return policy
- return None
-
-
-def delete_oldest_non_default_version(policy):
- try:
- versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
- if not v['IsDefaultVersion']]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list policy versions")
- versions.sort(key=lambda v: v['CreateDate'], reverse=True)
- for v in versions[-1:]:
- try:
- client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete policy version")
-
-
-# This needs to return policy_version, changed
-def get_or_create_policy_version(policy, policy_document):
- try:
- versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list policy versions")
-
- for v in versions:
- try:
- document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId']))
-
- if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))):
- return v, True
-
- # If the current policy matches the existing one
- if not compare_policies(document, json.loads(to_native(policy_document))):
- return v, False
-
- # No existing version so create one
- # There is a service limit (typically 5) of policy versions.
- #
- # Rather than assume that it is 5, we'll try to create the policy
- # and if that doesn't work, delete the oldest non default policy version
- # and try again.
- try:
- version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
- return version, True
- except is_boto3_error_code('LimitExceeded'):
- delete_oldest_non_default_version(policy)
- try:
- version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
- return version, True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e:
- module.fail_json_aws(second_e, msg="Couldn't create policy version")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't create policy version")
-
-
-def set_if_default(policy, policy_version, is_default):
- if is_default and not policy_version['IsDefaultVersion']:
- try:
- client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't set default policy version")
- return True
- return False
-
-
-def set_if_only(policy, policy_version, is_only):
- if is_only:
- try:
- versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[
- 'Versions'] if not v['IsDefaultVersion']]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list policy versions")
- for v in versions:
- try:
- client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete policy version")
- return len(versions) > 0
- return False
-
-
-def detach_all_entities(policy, **kwargs):
- try:
- entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName']))
-
- for g in entities['PolicyGroups']:
- try:
- client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName']))
- for u in entities['PolicyUsers']:
- try:
- client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName']))
- for r in entities['PolicyRoles']:
- try:
- client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName']))
- if entities['IsTruncated']:
- detach_all_entities(policy, marker=entities['Marker'])
-
-
-def create_or_update_policy(existing_policy):
- name = module.params.get('policy_name')
- description = module.params.get('policy_description')
- default = module.params.get('make_default')
- only = module.params.get('only_version')
-
- policy = None
-
- if module.params.get('policy') is not None:
- policy = json.dumps(json.loads(module.params.get('policy')))
-
- if existing_policy is None:
- if module.check_mode:
- module.exit_json(changed=True)
-
- # Create policy when none already exists
- try:
- rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name))
-
- module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy']))
- else:
- policy_version, changed = get_or_create_policy_version(existing_policy, policy)
- changed = set_if_default(existing_policy, policy_version, default) or changed
- changed = set_if_only(existing_policy, policy_version, only) or changed
-
- # If anything has changed we need to refresh the policy
- if changed:
- try:
- updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Couldn't get policy")
-
- module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(updated_policy))
- else:
- module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(existing_policy))
-
-
-def delete_policy(existing_policy):
- # Check for existing policy
- if existing_policy:
- if module.check_mode:
- module.exit_json(changed=True)
-
- # Detach policy
- detach_all_entities(existing_policy)
- # Delete Versions
- try:
- versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list policy versions")
- for v in versions:
- if not v['IsDefaultVersion']:
- try:
- client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Couldn't delete policy version {0}".format(v['VersionId']))
- # Delete policy
- try:
- client.delete_policy(PolicyArn=existing_policy['Arn'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName']))
-
- # This is the one case where we will return the old policy
- module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy))
- else:
- module.exit_json(changed=False, policy=None)
-
-
-def main():
- global module
- global client
-
- argument_spec = dict(
- policy_name=dict(required=True),
- policy_description=dict(default=''),
- policy=dict(type='json'),
- make_default=dict(type='bool', default=True),
- only_version=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_if=[['state', 'present', ['policy']]],
- supports_check_mode=True
- )
-
- name = module.params.get('policy_name')
- state = module.params.get('state')
-
- try:
- client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
-
- existing_policy = get_policy_by_name(name)
-
- if state == 'present':
- create_or_update_policy(existing_policy)
- else:
- delete_policy(existing_policy)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py b/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py
deleted file mode 100644
index 16abae170..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_mfa_device_info.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: iam_mfa_device_info
-version_added: 1.0.0
-short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
-description:
- - List the MFA (Multi-Factor Authentication) devices registered for a user
-author: Victor Costan (@pwnall)
-options:
- user_name:
- description:
- - The name of the user whose MFA devices will be listed
- type: str
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
-
-RETURN = """
-mfa_devices:
- description: The MFA devices registered for the given user
- returned: always
- type: list
- sample:
- - enable_date: "2016-03-11T23:25:36+00:00"
- serial_number: arn:aws:iam::123456789012:mfa/example
- user_name: example
- - enable_date: "2016-03-11T23:25:37+00:00"
- serial_number: arn:aws:iam::123456789012:mfa/example
- user_name: example
-"""
-
-EXAMPLES = r'''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html
-- name: List MFA devices
- community.aws.iam_mfa_device_info:
- register: mfa_devices
-
-# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
-- name: Assume an existing role
- community.aws.sts_assume_role:
- mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
- role_arn: "arn:aws:iam::123456789012:role/someRole"
- role_session_name: "someRoleSession"
- register: assumed_role
-'''
-
-try:
- import botocore
- from botocore.exceptions import ClientError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-
-
-def list_mfa_devices(connection, module):
- user_name = module.params.get('user_name')
- changed = False
-
- args = {}
- if user_name is not None:
- args['UserName'] = user_name
- try:
- response = connection.list_mfa_devices(**args)
- except ClientError as e:
- module.fail_json_aws(e, msg="Failed to list MFA devices")
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
-
-def main():
- argument_spec = dict(
- user_name=dict(required=False, default=None),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- connection = module.client('iam')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
-
- list_mfa_devices(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_password_policy.py b/ansible_collections/community/aws/plugins/modules/iam_password_policy.py
deleted file mode 100644
index 19614d26d..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_password_policy.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: iam_password_policy
-version_added: 1.0.0
-short_description: Update an IAM Password Policy
-description:
- - Module updates an IAM Password Policy on a given AWS account
-author:
- - "Aaron Smith (@slapula)"
-options:
- state:
- description:
- - Specifies the overall state of the password policy.
- required: true
- choices: ['present', 'absent']
- type: str
- min_pw_length:
- description:
- - Minimum password length.
- default: 6
- aliases: [minimum_password_length]
- type: int
- require_symbols:
- description:
- - Require symbols in password.
- default: false
- type: bool
- require_numbers:
- description:
- - Require numbers in password.
- default: false
- type: bool
- require_uppercase:
- description:
- - Require uppercase letters in password.
- default: false
- type: bool
- require_lowercase:
- description:
- - Require lowercase letters in password.
- default: false
- type: bool
- allow_pw_change:
- description:
- - Allow users to change their password.
- default: false
- type: bool
- aliases: [allow_password_change]
- pw_max_age:
- description:
- - Maximum age for a password in days. When this option is 0 then passwords
- do not expire automatically.
- default: 0
- aliases: [password_max_age]
- type: int
- pw_reuse_prevent:
- description:
- - Prevent re-use of passwords.
- default: 0
- aliases: [password_reuse_prevent, prevent_reuse]
- type: int
- pw_expire:
- description:
- - Prevents users from change an expired password.
- default: false
- type: bool
- aliases: [password_expire, expire]
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
-
-EXAMPLES = '''
-- name: Password policy for AWS account
- community.aws.iam_password_policy:
- state: present
- min_pw_length: 8
- require_symbols: false
- require_numbers: true
- require_uppercase: true
- require_lowercase: true
- allow_pw_change: true
- pw_max_age: 60
- pw_reuse_prevent: 5
- pw_expire: false
-'''
-
-RETURN = ''' # '''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-
-
-class IAMConnection(object):
- def __init__(self, module):
- try:
- self.connection = module.resource('iam')
- self.module = module
- except Exception as e:
- module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
-
- def policy_to_dict(self, policy):
- policy_attributes = [
- 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry',
- 'max_password_age', 'minimum_password_length', 'password_reuse_prevention',
- 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters'
- ]
- ret = {}
- for attr in policy_attributes:
- ret[attr] = getattr(policy, attr)
- return ret
-
- def update_password_policy(self, module, policy):
- min_pw_length = module.params.get('min_pw_length')
- require_symbols = module.params.get('require_symbols')
- require_numbers = module.params.get('require_numbers')
- require_uppercase = module.params.get('require_uppercase')
- require_lowercase = module.params.get('require_lowercase')
- allow_pw_change = module.params.get('allow_pw_change')
- pw_max_age = module.params.get('pw_max_age')
- pw_reuse_prevent = module.params.get('pw_reuse_prevent')
- pw_expire = module.params.get('pw_expire')
-
- update_parameters = dict(
- MinimumPasswordLength=min_pw_length,
- RequireSymbols=require_symbols,
- RequireNumbers=require_numbers,
- RequireUppercaseCharacters=require_uppercase,
- RequireLowercaseCharacters=require_lowercase,
- AllowUsersToChangePassword=allow_pw_change,
- HardExpiry=pw_expire
- )
- if pw_reuse_prevent:
- update_parameters.update(PasswordReusePrevention=pw_reuse_prevent)
- if pw_max_age:
- update_parameters.update(MaxPasswordAge=pw_max_age)
-
- try:
- original_policy = self.policy_to_dict(policy)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- original_policy = {}
-
- try:
- results = policy.update(**update_parameters)
- policy.reload()
- updated_policy = self.policy_to_dict(policy)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy")
-
- changed = (original_policy != updated_policy)
- return (changed, updated_policy, camel_dict_to_snake_dict(results))
-
- def delete_password_policy(self, policy):
- try:
- results = policy.delete()
- except is_boto3_error_code('NoSuchEntity'):
- self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy")
- return camel_dict_to_snake_dict(results)
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'state': dict(choices=['present', 'absent'], required=True),
- 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6),
- 'require_symbols': dict(type='bool', default=False),
- 'require_numbers': dict(type='bool', default=False),
- 'require_uppercase': dict(type='bool', default=False),
- 'require_lowercase': dict(type='bool', default=False),
- 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False),
- 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0),
- 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0),
- 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False),
- },
- supports_check_mode=True,
- )
-
- resource = IAMConnection(module)
- policy = resource.connection.AccountPasswordPolicy()
-
- state = module.params.get('state')
-
- if state == 'present':
- (changed, new_policy, update_result) = resource.update_password_policy(module, policy)
- module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy)
-
- if state == 'absent':
- delete_result = resource.delete_password_policy(policy)
- module.exit_json(changed=True, task_status={'IAM': delete_result})
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_role.py b/ansible_collections/community/aws/plugins/modules/iam_role.py
deleted file mode 100644
index 4add6a525..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_role.py
+++ /dev/null
@@ -1,736 +0,0 @@
-#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: iam_role
-version_added: 1.0.0
-short_description: Manage AWS IAM roles
-description:
- - Manage AWS IAM roles.
-author:
- - "Rob White (@wimnat)"
-options:
- path:
- description:
- - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
- default: "/"
- type: str
- name:
- description:
- - The name of the role to create.
- required: true
- type: str
- description:
- description:
- - Provides a description of the role.
- type: str
- boundary:
- description:
- - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
- - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
- - This is intended for roles/users that have permissions to create new IAM objects.
- - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
- aliases: [boundary_policy_arn]
- type: str
- assume_role_policy_document:
- description:
- - The trust relationship policy document that grants an entity permission to assume the role.
- - This parameter is required when I(state=present).
- type: json
- managed_policies:
- description:
- - A list of managed policy ARNs, managed policy ARNs or friendly names.
- - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
- - To embed an inline policy, use M(community.aws.iam_policy).
- aliases: ['managed_policy']
- type: list
- elements: str
- max_session_duration:
- description:
- - The maximum duration (in seconds) of a session when assuming the role.
- - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
- type: int
- purge_policies:
- description:
- - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- type: bool
- aliases: ['purge_policy', 'purge_managed_policies']
- default: true
- state:
- description:
- - Create or remove the IAM role.
- default: present
- choices: [ present, absent ]
- type: str
- create_instance_profile:
- description:
- - Creates an IAM instance profile along with the role.
- default: true
- type: bool
- delete_instance_profile:
- description:
- - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
- profile created with the same I(name) as the role.
- - Only applies when I(state=absent).
- default: false
- type: bool
- wait_timeout:
- description:
- - How long (in seconds) to wait for creation / update to complete.
- default: 120
- type: int
- wait:
- description:
- - When I(wait=True) the module will wait for up to I(wait_timeout) seconds
- for IAM role creation before returning.
- default: True
- type: bool
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
-'''
-
-EXAMPLES = r'''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create a role with description and tags
- community.aws.iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- description: This is My New Role
- tags:
- env: dev
-
-- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
- community.aws.iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- managed_policies:
- - arn:aws:iam::aws:policy/PowerUserAccess
-
-- name: Keep the role created above but remove all managed policies
- community.aws.iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- managed_policies: []
-
-- name: Delete the role
- community.aws.iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
- state: absent
-
-'''
-RETURN = r'''
-iam_role:
- description: dictionary containing the IAM Role data
- returned: success
- type: complex
- contains:
- path:
- description: the path to the role
- type: str
- returned: always
- sample: /
- role_name:
- description: the friendly name that identifies the role
- type: str
- returned: always
- sample: myrole
- role_id:
- description: the stable and unique string identifying the role
- type: str
- returned: always
- sample: ABCDEFF4EZ4ABCDEFV4ZC
- arn:
- description: the Amazon Resource Name (ARN) specifying the role
- type: str
- returned: always
- sample: "arn:aws:iam::1234567890:role/mynewrole"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the role was created
- type: str
- returned: always
- sample: "2016-08-14T04:36:28+00:00"
- assume_role_policy_document:
- description:
- - the policy that grants an entity permission to assume the role
- - |
- note: the case of keys in this dictionary are currently converted from CamelCase to
- snake_case. In a release after 2023-12-01 this behaviour will change
- type: dict
- returned: always
- sample: {
- 'statement': [
- {
- 'action': 'sts:AssumeRole',
- 'effect': 'Allow',
- 'principal': {
- 'service': 'ec2.amazonaws.com'
- },
- 'sid': ''
- }
- ],
- 'version': '2012-10-17'
- }
- assume_role_policy_document_raw:
- description: the policy that grants an entity permission to assume the role
- type: dict
- returned: always
- version_added: 5.3.0
- sample: {
- 'Statement': [
- {
- 'Action': 'sts:AssumeRole',
- 'Effect': 'Allow',
- 'Principal': {
- 'Service': 'ec2.amazonaws.com'
- },
- 'Sid': ''
- }
- ],
- 'Version': '2012-10-17'
- }
-
- attached_policies:
- description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
- type: list
- returned: always
- sample: [
- {
- 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
- 'policy_name': 'PowerUserAccess'
- }
- ]
- tags:
- description: role tags
- type: dict
- returned: always
- sample: '{"Env": "Prod"}'
-'''
-
-import json
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
-
-
-@AWSRetry.jittered_backoff()
-def _list_policies(client):
- paginator = client.get_paginator('list_policies')
- return paginator.paginate().build_full_result()['Policies']
-
-
-def wait_iam_exists(module, client):
- if module.check_mode:
- return
- if not module.params.get('wait'):
- return
-
- role_name = module.params.get('name')
- wait_timeout = module.params.get('wait_timeout')
-
- delay = min(wait_timeout, 5)
- max_attempts = wait_timeout // delay
-
- try:
- waiter = client.get_waiter('role_exists')
- waiter.wait(
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts},
- RoleName=role_name,
- )
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, msg='Timeout while waiting on IAM role creation')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed while waiting on IAM role creation')
-
-
-def convert_friendly_names_to_arns(module, client, policy_names):
- if not any(not policy.startswith('arn:') for policy in policy_names):
- return policy_names
-
- allpolicies = {}
- policies = _list_policies(client)
-
- for policy in policies:
- allpolicies[policy['PolicyName']] = policy['Arn']
- allpolicies[policy['Arn']] = policy['Arn']
- try:
- return [allpolicies[policy] for policy in policy_names]
- except KeyError as e:
- module.fail_json_aws(e, msg="Couldn't find policy")
-
-
-def attach_policies(module, client, policies_to_attach, role_name):
- if module.check_mode and policies_to_attach:
- return True
-
- changed = False
- for policy_arn in policies_to_attach:
- try:
- client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn, aws_retry=True)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, role_name))
- return changed
-
-
-def remove_policies(module, client, policies_to_remove, role_name):
- if module.check_mode and policies_to_remove:
- return True
-
- changed = False
- for policy in policies_to_remove:
- try:
- client.detach_role_policy(RoleName=role_name, PolicyArn=policy, aws_retry=True)
- changed = True
- except is_boto3_error_code('NoSuchEntityException'):
- pass
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name))
- return changed
-
-
-def remove_inline_policies(module, client, role_name):
- current_inline_policies = get_inline_policy_list(module, client, role_name)
- for policy in current_inline_policies:
- try:
- client.delete_role_policy(RoleName=role_name, PolicyName=policy, aws_retry=True)
- except is_boto3_error_code('NoSuchEntityException'):
- pass
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name))
-
-
-def generate_create_params(module):
- params = dict()
- params['Path'] = module.params.get('path')
- params['RoleName'] = module.params.get('name')
- params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
- if module.params.get('description') is not None:
- params['Description'] = module.params.get('description')
- if module.params.get('max_session_duration') is not None:
- params['MaxSessionDuration'] = module.params.get('max_session_duration')
- if module.params.get('boundary') is not None:
- params['PermissionsBoundary'] = module.params.get('boundary')
- if module.params.get('tags') is not None:
- params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
-
- return params
-
-
-def create_basic_role(module, client):
- """
- Perform the Role creation.
- Assumes tests for the role existing have already been performed.
- """
- if module.check_mode:
- module.exit_json(changed=True)
-
- try:
- params = generate_create_params(module)
- role = client.create_role(aws_retry=True, **params)
- # 'Description' is documented as key of the role returned by create_role
- # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
- # Get the role after creating it.
- role = get_role_with_backoff(module, client, params['RoleName'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to create role")
-
- return role
-
-
-def update_role_assumed_policy(module, client, role_name, target_assumed_policy, current_assumed_policy):
- # Check Assumed Policy document
- if target_assumed_policy is None or not compare_policies(current_assumed_policy, json.loads(target_assumed_policy)):
- return False
-
- if module.check_mode:
- return True
-
- try:
- client.update_assume_role_policy(
- RoleName=role_name,
- PolicyDocument=target_assumed_policy,
- aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name))
- return True
-
-
-def update_role_description(module, client, role_name, target_description, current_description):
- # Check Description update
- if target_description is None or current_description == target_description:
- return False
-
- if module.check_mode:
- return True
-
- try:
- client.update_role(RoleName=role_name, Description=target_description, aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update description for role {0}".format(role_name))
- return True
-
-
-def update_role_max_session_duration(module, client, role_name, target_duration, current_duration):
- # Check MaxSessionDuration update
- if target_duration is None or current_duration == target_duration:
- return False
-
- if module.check_mode:
- return True
-
- try:
- client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(role_name))
- return True
-
-
-def update_role_permissions_boundary(module, client, role_name, target_permissions_boundary, current_permissions_boundary):
- # Check PermissionsBoundary
- if target_permissions_boundary is None or target_permissions_boundary == current_permissions_boundary:
- return False
-
- if module.check_mode:
- return True
-
- if target_permissions_boundary == '':
- try:
- client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name))
- else:
- try:
- client.put_role_permissions_boundary(RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name))
- return True
-
-
-def update_managed_policies(module, client, role_name, managed_policies, purge_policies):
- # Check Managed Policies
- if managed_policies is None:
- return False
-
- # Get list of current attached managed policies
- current_attached_policies = get_attached_policy_list(module, client, role_name)
- current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
-
- if len(managed_policies) == 1 and managed_policies[0] is None:
- managed_policies = []
-
- policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
- policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
-
- changed = False
- if purge_policies and policies_to_remove:
- if module.check_mode:
- return True
- else:
- changed |= remove_policies(module, client, policies_to_remove, role_name)
-
- if policies_to_attach:
- if module.check_mode:
- return True
- else:
- changed |= attach_policies(module, client, policies_to_attach, role_name)
-
- return changed
-
-
-def create_or_update_role(module, client):
-
- role_name = module.params.get('name')
- assumed_policy = module.params.get('assume_role_policy_document')
- create_instance_profile = module.params.get('create_instance_profile')
- description = module.params.get('description')
- duration = module.params.get('max_session_duration')
- path = module.params.get('path')
- permissions_boundary = module.params.get('boundary')
- purge_tags = module.params.get('purge_tags')
- tags = ansible_dict_to_boto3_tag_list(module.params.get('tags')) if module.params.get('tags') else None
- purge_policies = module.params.get('purge_policies')
- managed_policies = module.params.get('managed_policies')
- if managed_policies:
- # Attempt to list the policies early so we don't leave things behind if we can't find them.
- managed_policies = convert_friendly_names_to_arns(module, client, managed_policies)
-
- changed = False
-
- # Get role
- role = get_role(module, client, role_name)
-
- # If role is None, create it
- if role is None:
- role = create_basic_role(module, client)
-
- if not module.check_mode and module.params.get('wait'):
- wait_iam_exists(module, client)
-
- changed = True
- else:
- # Role exists - get current attributes
- current_assumed_policy = role.get('AssumeRolePolicyDocument')
- current_description = role.get('Description')
- current_duration = role.get('MaxSessionDuration')
- current_permissions_boundary = role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', '')
-
- # Update attributes
- changed |= update_role_tags(module, client, role_name, tags, purge_tags)
- changed |= update_role_assumed_policy(module, client, role_name, assumed_policy, current_assumed_policy)
- changed |= update_role_description(module, client, role_name, description, current_description)
- changed |= update_role_max_session_duration(module, client, role_name, duration, current_duration)
- changed |= update_role_permissions_boundary(module, client, role_name, permissions_boundary, current_permissions_boundary)
-
- if not module.check_mode and module.params.get('wait'):
- wait_iam_exists(module, client)
-
- if create_instance_profile:
- changed |= create_instance_profiles(module, client, role_name, path)
-
- if not module.check_mode and module.params.get('wait'):
- wait_iam_exists(module, client)
-
- changed |= update_managed_policies(module, client, role_name, managed_policies, purge_policies)
- wait_iam_exists(module, client)
-
- # Get the role again
- role = get_role(module, client, role_name)
- role['AttachedPolicies'] = get_attached_policy_list(module, client, role_name)
- role['tags'] = get_role_tags(module, client)
-
- camel_role = camel_dict_to_snake_dict(role, ignore_list=['tags'])
- camel_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument", {})
- module.exit_json(changed=changed, iam_role=camel_role, **camel_role)
-
-
-def create_instance_profiles(module, client, role_name, path):
-
- # Fetch existing Profiles
- try:
- instance_profiles = client.list_instance_profiles_for_role(RoleName=role_name, aws_retry=True)['InstanceProfiles']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
-
- # Profile already exists
- if any(p['InstanceProfileName'] == role_name for p in instance_profiles):
- return False
-
- if module.check_mode:
- return True
-
- # Make sure an instance profile is created
- try:
- client.create_instance_profile(InstanceProfileName=role_name, Path=path, aws_retry=True)
- except is_boto3_error_code('EntityAlreadyExists'):
- # If the profile already exists, no problem, move on.
- # Implies someone's changing things at the same time...
- return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name))
-
- # And attach the role to the profile
- try:
- client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(role_name))
-
- return True
-
-
-def remove_instance_profiles(module, client, role_name):
- delete_profiles = module.params.get("delete_instance_profile")
-
- try:
- instance_profiles = client.list_instance_profiles_for_role(aws_retry=True, RoleName=role_name)['InstanceProfiles']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
-
- # Remove the role from the instance profile(s)
- for profile in instance_profiles:
- profile_name = profile['InstanceProfileName']
- try:
- if not module.check_mode:
- client.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, RoleName=role_name)
- if profile_name == role_name:
- if delete_profiles:
- try:
- client.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
- except is_boto3_error_code('NoSuchEntityException'):
- pass
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
-
-
-def destroy_role(module, client):
-
- role_name = module.params.get('name')
- role = get_role(module, client, role_name)
-
- if role is None:
- module.exit_json(changed=False)
-
- if not module.check_mode:
- # Before we try to delete the role we need to remove any
- # - attached instance profiles
- # - attached managed policies
- # - embedded inline policies
- remove_instance_profiles(module, client, role_name)
- update_managed_policies(module, client, role_name, [], True)
- remove_inline_policies(module, client, role_name)
- try:
- client.delete_role(aws_retry=True, RoleName=role_name)
- except is_boto3_error_code('NoSuchEntityException'):
- module.exit_json(changed=False)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to delete role")
-
- module.exit_json(changed=True)
-
-
-def get_role_with_backoff(module, client, name):
- try:
- return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(client.get_role)(RoleName=name)['Role']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
-
-
-def get_role(module, client, name):
- try:
- return client.get_role(RoleName=name, aws_retry=True)['Role']
- except is_boto3_error_code('NoSuchEntity'):
- return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
-
-
-def get_attached_policy_list(module, client, name):
- try:
- return client.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
-
-
-def get_inline_policy_list(module, client, name):
- try:
- return client.list_role_policies(RoleName=name, aws_retry=True)['PolicyNames']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
-
-
-def get_role_tags(module, client):
- role_name = module.params.get('name')
- try:
- return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
-
-
-def update_role_tags(module, client, role_name, new_tags, purge_tags):
- if new_tags is None:
- return False
- new_tags = boto3_tag_list_to_ansible_dict(new_tags)
-
- try:
- existing_tags = boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError):
- existing_tags = {}
-
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
-
- if not module.check_mode:
- try:
- if tags_to_remove:
- client.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
- if tags_to_add:
- client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
-
- changed = bool(tags_to_add) or bool(tags_to_remove)
- return changed
-
-
-def main():
-
- argument_spec = dict(
- name=dict(type='str', required=True),
- path=dict(type='str', default="/"),
- assume_role_policy_document=dict(type='json'),
- managed_policies=dict(type='list', aliases=['managed_policy'], elements='str'),
- max_session_duration=dict(type='int'),
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- description=dict(type='str'),
- boundary=dict(type='str', aliases=['boundary_policy_arn']),
- create_instance_profile=dict(type='bool', default=True),
- delete_instance_profile=dict(type='bool', default=False),
- purge_policies=dict(default=True, type='bool', aliases=['purge_policy', 'purge_managed_policies']),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=120, type='int'),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[('state', 'present', ['assume_role_policy_document'])],
- supports_check_mode=True)
-
- module.deprecate("All return values other than iam_role and changed have been deprecated and "
- "will be removed in a release after 2023-12-01.",
- date="2023-12-01", collection_name="community.aws")
-
- module.deprecate("In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document "
- "will no longer be converted from CamelCase to snake_case. The "
- "iam_role.assume_role_policy_document_raw return value already returns the "
- "policy document in this future format.",
- date="2023-12-01", collection_name="community.aws")
-
- if module.params.get('boundary'):
- if module.params.get('create_instance_profile'):
- module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
- if not module.params.get('boundary').startswith('arn:aws:iam'):
- module.fail_json(msg="Boundary policy must be an ARN")
- if module.params.get('max_session_duration'):
- max_session_duration = module.params.get('max_session_duration')
- if max_session_duration < 3600 or max_session_duration > 43200:
- module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
- if module.params.get('path'):
- path = module.params.get('path')
- if not path.endswith('/') or not path.startswith('/'):
- module.fail_json(msg="path must begin and end with /")
-
- client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_role(module, client)
- elif state == 'absent':
- destroy_role(module, client)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_role_info.py b/ansible_collections/community/aws/plugins/modules/iam_role_info.py
deleted file mode 100644
index d66be487a..000000000
--- a/ansible_collections/community/aws/plugins/modules/iam_role_info.py
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: iam_role_info
-version_added: 1.0.0
-short_description: Gather information on IAM roles
-description:
- - Gathers information about IAM roles.
-author:
- - "Will Thames (@willthames)"
-options:
- name:
- description:
- - Name of a role to search for.
- - Mutually exclusive with I(path_prefix).
- aliases:
- - role_name
- type: str
- path_prefix:
- description:
- - Prefix of role to restrict IAM role search for.
- - Mutually exclusive with I(name).
- type: str
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
-
-EXAMPLES = '''
-- name: find all existing IAM roles
- community.aws.iam_role_info:
- register: result
-
-- name: describe a single role
- community.aws.iam_role_info:
- name: MyIAMRole
-
-- name: describe all roles matching a path prefix
- community.aws.iam_role_info:
- path_prefix: /application/path
-'''
-
-RETURN = '''
-iam_roles:
- description: List of IAM roles
- returned: always
- type: complex
- contains:
- arn:
- description: Amazon Resource Name for IAM role.
- returned: always
- type: str
- sample: arn:aws:iam::123456789012:role/AnsibleTestRole
- assume_role_policy_document:
- description:
- - The policy that grants an entity permission to assume the role
- - |
- Note: the case of keys in this dictionary are currently converted from CamelCase to
- snake_case. In a release after 2023-12-01 this behaviour will change.
- returned: always
- type: dict
- assume_role_policy_document_raw:
- description: The policy document describing what can assume the role.
- returned: always
- type: dict
- version_added: 5.3.0
- create_date:
- description: Date IAM role was created.
- returned: always
- type: str
- sample: '2017-10-23T00:05:08+00:00'
- inline_policies:
- description: List of names of inline policies.
- returned: always
- type: list
- sample: []
- managed_policies:
- description: List of attached managed policies.
- returned: always
- type: complex
- contains:
- policy_arn:
- description: Amazon Resource Name for the policy.
- returned: always
- type: str
- sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy
- policy_name:
- description: Name of managed policy.
- returned: always
- type: str
- sample: AnsibleTestEC2Policy
- instance_profiles:
- description: List of attached instance profiles.
- returned: always
- type: complex
- contains:
- arn:
- description: Amazon Resource Name for the instance profile.
- returned: always
- type: str
- sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy
- create_date:
- description: Date instance profile was created.
- returned: always
- type: str
- sample: '2017-10-23T00:05:08+00:00'
- instance_profile_id:
- description: Amazon Identifier for the instance profile.
- returned: always
- type: str
- sample: AROAII7ABCD123456EFGH
- instance_profile_name:
- description: Name of instance profile.
- returned: always
- type: str
- sample: AnsibleTestEC2Policy
- path:
- description: Path of instance profile.
- returned: always
- type: str
- sample: /
- roles:
- description: List of roles associated with this instance profile.
- returned: always
- type: list
- sample: []
- path:
- description: Path of role.
- returned: always
- type: str
- sample: /
- role_id:
- description: Amazon Identifier for the role.
- returned: always
- type: str
- sample: AROAII7ABCD123456EFGH
- role_name:
- description: Name of the role.
- returned: always
- type: str
- sample: AnsibleTestRole
- tags:
- description: Role tags.
- type: dict
- returned: always
- sample: '{"Env": "Prod"}'
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-
-
-@AWSRetry.jittered_backoff()
-def list_iam_roles_with_backoff(client, **kwargs):
- paginator = client.get_paginator('list_roles')
- return paginator.paginate(**kwargs).build_full_result()
-
-
-@AWSRetry.jittered_backoff()
-def list_iam_role_policies_with_backoff(client, role_name):
- paginator = client.get_paginator('list_role_policies')
- return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames']
-
-
-@AWSRetry.jittered_backoff()
-def list_iam_attached_role_policies_with_backoff(client, role_name):
- paginator = client.get_paginator('list_attached_role_policies')
- return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies']
-
-
-@AWSRetry.jittered_backoff()
-def list_iam_instance_profiles_for_role_with_backoff(client, role_name):
- paginator = client.get_paginator('list_instance_profiles_for_role')
- return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles']
-
-
-def describe_iam_role(module, client, role):
- name = role['RoleName']
- try:
- role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name)
- try:
- role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name)
- try:
- role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name)
- try:
- role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags'])
- del role['Tags']
- except KeyError:
- role['tags'] = {}
- return role
-
-
-def describe_iam_roles(module, client):
- name = module.params['name']
- path_prefix = module.params['path_prefix']
- if name:
- try:
- roles = [client.get_role(RoleName=name, aws_retry=True)['Role']]
- except is_boto3_error_code('NoSuchEntity'):
- return []
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
- else:
- params = dict()
- if path_prefix:
- if not path_prefix.startswith('/'):
- path_prefix = '/' + path_prefix
- if not path_prefix.endswith('/'):
- path_prefix = path_prefix + '/'
- params['PathPrefix'] = path_prefix
- try:
- roles = list_iam_roles_with_backoff(client, **params)['Roles']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list IAM roles")
- return [normalize_role(describe_iam_role(module, client, role)) for role in roles]
-
-
-def normalize_profile(profile):
- new_profile = camel_dict_to_snake_dict(profile)
- if profile.get("Roles"):
- profile["roles"] = [normalize_role(role) for role in profile.get("Roles")]
- return new_profile
-
-
-def normalize_role(role):
- new_role = camel_dict_to_snake_dict(role, ignore_list=['tags'])
- new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument")
- if role.get("InstanceProfiles"):
- role["instance_profiles"] = [normalize_profile(profile) for profile in role.get("InstanceProfiles")]
- return new_role
-
-
-def main():
- """
- Module action handler
- """
- argument_spec = dict(
- name=dict(aliases=['role_name']),
- path_prefix=dict(),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['name', 'path_prefix']])
-
- client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
-
- module.deprecate("In a release after 2023-12-01 the contents of assume_role_policy_document "
- "will no longer be converted from CamelCase to snake_case. The "
- ".assume_role_policy_document_raw return value already returns the "
- "policy document in this future format.",
- date="2023-12-01", collection_name="community.aws")
-
- module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py b/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
index f79e4c2c6..acaaa38fc 100644
--- a/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
+++ b/ansible_collections/community/aws/plugins/modules/iam_saml_federation.py
@@ -1,25 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
---
module: iam_saml_federation
version_added: 1.0.0
@@ -42,17 +27,18 @@ options:
default: present
choices: [ "present", "absent" ]
type: str
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
author:
- Tony (@axc450)
- Aidan Rowe (@aidan-)
-'''
-EXAMPLES = '''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# It is assumed that their matching environment variables are set.
# Creates a new iam saml identity provider if not present
@@ -74,9 +60,9 @@ EXAMPLES = '''
community.aws.iam_saml_federation:
name: example3
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
saml_provider:
description: Details of the SAML Identity Provider that was created/modified.
type: complex
@@ -101,15 +87,16 @@ saml_provider:
type: str
returned: present
sample: "2017-02-08T04:36:28+00:00"
-'''
+"""
try:
- import botocore.exceptions
+ import botocore
except ImportError:
pass
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class SAMLProviderManager:
@@ -119,7 +106,7 @@ class SAMLProviderManager:
self.module = module
try:
- self.conn = module.client('iam')
+ self.conn = module.client("iam")
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Unknown AWS SDK error")
@@ -146,10 +133,10 @@ class SAMLProviderManager:
def _get_provider_arn(self, name):
providers = self._list_saml_providers()
- for p in providers['SAMLProviderList']:
- provider_name = p['Arn'].split('/', 1)[1]
+ for p in providers["SAMLProviderList"]:
+ provider_name = p["Arn"].split("/", 1)[1]
if name == provider_name:
- return p['Arn']
+ return p["Arn"]
return None
@@ -157,55 +144,55 @@ class SAMLProviderManager:
if not metadata:
self.module.fail_json(msg="saml_metadata_document must be defined for present state")
- res = {'changed': False}
+ res = {"changed": False}
try:
arn = self._get_provider_arn(name)
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
+ self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'")
if arn: # see if metadata needs updating
try:
resp = self._get_saml_provider(arn)
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name))
+ self.module.fail_json_aws(e, msg=f"Could not retrieve the identity provider '{name}'")
- if metadata.strip() != resp['SAMLMetadataDocument'].strip():
+ if metadata.strip() != resp["SAMLMetadataDocument"].strip():
# provider needs updating
- res['changed'] = True
+ res["changed"] = True
if not self.module.check_mode:
try:
resp = self._update_saml_provider(arn, metadata)
- res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
+ res["saml_provider"] = self._build_res(resp["SAMLProviderArn"])
except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name))
+ self.module.fail_json_aws(e, msg=f"Could not update the identity provider '{name}'")
else:
- res['saml_provider'] = self._build_res(arn)
+ res["saml_provider"] = self._build_res(arn)
else: # create
- res['changed'] = True
+ res["changed"] = True
if not self.module.check_mode:
try:
resp = self._create_saml_provider(metadata, name)
- res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
+ res["saml_provider"] = self._build_res(resp["SAMLProviderArn"])
except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name))
+ self.module.fail_json_aws(e, msg=f"Could not create the identity provider '{name}'")
self.module.exit_json(**res)
def delete_saml_provider(self, name):
- res = {'changed': False}
+ res = {"changed": False}
try:
arn = self._get_provider_arn(name)
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
+ self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'")
if arn: # delete
- res['changed'] = True
+ res["changed"] = True
if not self.module.check_mode:
try:
self._delete_saml_provider(arn)
except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name))
+ self.module.fail_json_aws(e, msg=f"Could not delete the identity provider '{name}'")
self.module.exit_json(**res)
@@ -215,7 +202,7 @@ class SAMLProviderManager:
"arn": arn,
"metadata_document": saml_provider["SAMLMetadataDocument"],
"create_date": saml_provider["CreateDate"].isoformat(),
- "expire_date": saml_provider["ValidUntil"].isoformat()
+ "expire_date": saml_provider["ValidUntil"].isoformat(),
}
@@ -223,26 +210,26 @@ def main():
argument_spec = dict(
name=dict(required=True),
saml_metadata_document=dict(default=None, required=False),
- state=dict(default='present', required=False, choices=['present', 'absent']),
+ state=dict(default="present", required=False, choices=["present", "absent"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_if=[('state', 'present', ['saml_metadata_document'])]
+ required_if=[("state", "present", ["saml_metadata_document"])],
)
- name = module.params['name']
- state = module.params.get('state')
- saml_metadata_document = module.params.get('saml_metadata_document')
+ name = module.params["name"]
+ state = module.params.get("state")
+ saml_metadata_document = module.params.get("saml_metadata_document")
sp_man = SAMLProviderManager(module)
- if state == 'present':
+ if state == "present":
sp_man.create_or_update_saml_provider(name, saml_metadata_document)
- elif state == 'absent':
+ elif state == "absent":
sp_man.delete_saml_provider(name)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py b/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py
index f3d5c5808..6a7734aca 100644
--- a/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py
+++ b/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py
@@ -1,24 +1,10 @@
#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
---
module: iam_server_certificate
version_added: 1.0.0
@@ -76,12 +62,14 @@ options:
author:
- Jonathan I. Davila (@defionscode)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
+
+RETURN = r""" # """
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Basic server certificate upload from local file
community.aws.iam_server_certificate:
name: very_ssl
@@ -104,7 +92,7 @@ EXAMPLES = '''
name: very_ssl
new_name: new_very_ssl
state: present
-'''
+"""
try:
import botocore
@@ -113,29 +101,30 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
@AWSRetry.jittered_backoff()
def _list_server_certficates():
- paginator = client.get_paginator('list_server_certificates')
- return paginator.paginate().build_full_result()['ServerCertificateMetadataList']
+ paginator = client.get_paginator("list_server_certificates")
+ return paginator.paginate().build_full_result()["ServerCertificateMetadataList"]
def check_duplicate_cert(new_cert):
- orig_cert_names = list(c['ServerCertificateName'] for c in _list_server_certficates())
+ orig_cert_names = list(c["ServerCertificateName"] for c in _list_server_certficates())
for cert_name in orig_cert_names:
cert = get_server_certificate(cert_name)
if not cert:
continue
- cert_body = cert.get('certificate_body', None)
+ cert_body = cert.get("certificate_body", None)
if not _compare_cert(new_cert, cert_body):
continue
module.fail_json(
changed=False,
- msg='This certificate already exists under the name {0} and dup_ok=False'.format(cert_name),
+ msg=f"This certificate already exists under the name {cert_name} and dup_ok=False",
duplicate_cert=cert,
)
@@ -148,25 +137,25 @@ def _compare_cert(cert_a, cert_b):
# Trim out the whitespace before comparing the certs. While this could mean
# an invalid cert 'matches' a valid cert, that's better than some stray
# whitespace breaking things
- cert_a.replace('\r', '')
- cert_a.replace('\n', '')
- cert_a.replace(' ', '')
- cert_b.replace('\r', '')
- cert_b.replace('\n', '')
- cert_b.replace(' ', '')
+ cert_a.replace("\r", "")
+ cert_a.replace("\n", "")
+ cert_a.replace(" ", "")
+ cert_b.replace("\r", "")
+ cert_b.replace("\n", "")
+ cert_b.replace(" ", "")
return cert_a == cert_b
def update_server_certificate(current_cert):
changed = False
- cert = module.params.get('cert')
- cert_chain = module.params.get('cert_chain')
+ cert = module.params.get("cert")
+ cert_chain = module.params.get("cert_chain")
- if not _compare_cert(cert, current_cert.get('certificate_body', None)):
- module.fail_json(msg='Modifying the certificate body is not supported by AWS')
- if not _compare_cert(cert_chain, current_cert.get('certificate_chain', None)):
- module.fail_json(msg='Modifying the chaining certificate is not supported by AWS')
+ if not _compare_cert(cert, current_cert.get("certificate_body", None)):
+ module.fail_json(msg="Modifying the certificate body is not supported by AWS")
+ if not _compare_cert(cert_chain, current_cert.get("certificate_chain", None)):
+ module.fail_json(msg="Modifying the chaining certificate is not supported by AWS")
# We can't compare keys.
if module.check_mode:
@@ -179,15 +168,15 @@ def update_server_certificate(current_cert):
def create_server_certificate():
- cert = module.params.get('cert')
- key = module.params.get('key')
- cert_chain = module.params.get('cert_chain')
+ cert = module.params.get("cert")
+ key = module.params.get("key")
+ cert_chain = module.params.get("cert_chain")
- if not module.params.get('dup_ok'):
+ if not module.params.get("dup_ok"):
check_duplicate_cert(cert)
- path = module.params.get('path')
- name = module.params.get('name')
+ path = module.params.get("path")
+ name = module.params.get("name")
params = dict(
ServerCertificateName=name,
@@ -196,28 +185,25 @@ def create_server_certificate():
)
if cert_chain:
- params['CertificateChain'] = cert_chain
+ params["CertificateChain"] = cert_chain
if path:
- params['Path'] = path
+ params["Path"] = path
if module.check_mode:
return True
try:
- client.upload_server_certificate(
- aws_retry=True,
- **params
- )
+ client.upload_server_certificate(aws_retry=True, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name))
+ module.fail_json_aws(e, msg=f"Failed to update server certificate {name}")
return True
def rename_server_certificate(current_cert):
- name = module.params.get('name')
- new_name = module.params.get('new_name')
- new_path = module.params.get('new_path')
+ name = module.params.get("name")
+ new_name = module.params.get("new_name")
+ new_path = module.params.get("new_path")
changes = dict()
@@ -226,16 +212,16 @@ def rename_server_certificate(current_cert):
current_cert = get_server_certificate(new_name)
else:
if new_name:
- changes['NewServerCertificateName'] = new_name
+ changes["NewServerCertificateName"] = new_name
- cert_metadata = current_cert.get('server_certificate_metadata', {})
+ cert_metadata = current_cert.get("server_certificate_metadata", {})
if not current_cert:
- module.fail_json(msg='Unable to find certificate {0}'.format(name))
+ module.fail_json(msg=f"Unable to find certificate {name}")
- current_path = cert_metadata.get('path', None)
+ current_path = cert_metadata.get("path", None)
if new_path and current_path != new_path:
- changes['NewPath'] = new_path
+ changes["NewPath"] = new_path
if not changes:
return False
@@ -244,14 +230,9 @@ def rename_server_certificate(current_cert):
return True
try:
- client.update_server_certificate(
- aws_retry=True,
- ServerCertificateName=name,
- **changes
- )
+ client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to update server certificate {0}'.format(name),
- changes=changes)
+ module.fail_json_aws(e, msg=f"Failed to update server certificate {name}", changes=changes)
return True
@@ -263,17 +244,20 @@ def delete_server_certificate(current_cert):
if module.check_mode:
return True
- name = module.params.get('name')
+ name = module.params.get("name")
try:
result = client.delete_server_certificate(
aws_retry=True,
ServerCertificateName=name,
)
- except is_boto3_error_code('NoSuchEntity'):
+ except is_boto3_error_code("NoSuchEntity"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to delete server certificate {0}'.format(name))
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Failed to delete server certificate {name}")
return True
@@ -286,11 +270,14 @@ def get_server_certificate(name):
aws_retry=True,
ServerCertificateName=name,
)
- except is_boto3_error_code('NoSuchEntity'):
+ except is_boto3_error_code("NoSuchEntity"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to get server certificate {0}'.format(name))
- cert = dict(camel_dict_to_snake_dict(result.get('ServerCertificate')))
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Failed to get server certificate {name}")
+ cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate")))
return cert
@@ -300,75 +287,74 @@ def compatability_results(current_cert):
if not current_cert:
return compat_results
- metadata = current_cert.get('server_certificate_metadata', {})
-
- if current_cert.get('certificate_body', None):
- compat_results['cert_body'] = current_cert.get('certificate_body')
- if current_cert.get('certificate_chain', None):
- compat_results['chain_cert_body'] = current_cert.get('certificate_chain')
- if metadata.get('arn', None):
- compat_results['arn'] = metadata.get('arn')
- if metadata.get('expiration', None):
- compat_results['expiration_date'] = metadata.get('expiration')
- if metadata.get('path', None):
- compat_results['cert_path'] = metadata.get('path')
- if metadata.get('server_certificate_name', None):
- compat_results['name'] = metadata.get('server_certificate_name')
- if metadata.get('upload_date', None):
- compat_results['upload_date'] = metadata.get('upload_date')
+ metadata = current_cert.get("server_certificate_metadata", {})
+
+ if current_cert.get("certificate_body", None):
+ compat_results["cert_body"] = current_cert.get("certificate_body")
+ if current_cert.get("certificate_chain", None):
+ compat_results["chain_cert_body"] = current_cert.get("certificate_chain")
+ if metadata.get("arn", None):
+ compat_results["arn"] = metadata.get("arn")
+ if metadata.get("expiration", None):
+ compat_results["expiration_date"] = metadata.get("expiration")
+ if metadata.get("path", None):
+ compat_results["cert_path"] = metadata.get("path")
+ if metadata.get("server_certificate_name", None):
+ compat_results["name"] = metadata.get("server_certificate_name")
+ if metadata.get("upload_date", None):
+ compat_results["upload_date"] = metadata.get("upload_date")
return compat_results
def main():
-
global module
global client
argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
+ state=dict(required=True, choices=["present", "absent"]),
name=dict(required=True),
cert=dict(),
key=dict(no_log=True),
cert_chain=dict(),
new_name=dict(),
- path=dict(default='/'),
+ path=dict(default="/"),
new_path=dict(),
- dup_ok=dict(type='bool', default=True),
+ dup_ok=dict(type="bool", default=True),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[
- ['new_path', 'key'],
- ['new_path', 'cert'],
- ['new_path', 'cert_chain'],
- ['new_name', 'key'],
- ['new_name', 'cert'],
- ['new_name', 'cert_chain'],
+ ["new_path", "key"],
+ ["new_path", "cert"],
+ ["new_path", "cert_chain"],
+ ["new_name", "key"],
+ ["new_name", "cert"],
+ ["new_name", "cert_chain"],
],
supports_check_mode=True,
)
- client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff())
- state = module.params.get('state')
- name = module.params.get('name')
- path = module.params.get('path')
- new_name = module.params.get('new_name')
- new_path = module.params.get('new_path')
- dup_ok = module.params.get('dup_ok')
+ state = module.params.get("state")
+ name = module.params.get("name")
+ path = module.params.get("path")
+ new_name = module.params.get("new_name")
+ new_path = module.params.get("new_path")
+ dup_ok = module.params.get("dup_ok")
current_cert = get_server_certificate(name)
results = dict()
- if state == 'absent':
+ if state == "absent":
changed = delete_server_certificate(current_cert)
if changed:
- results['deleted_cert'] = name
+ results["deleted_cert"] = name
else:
- msg = 'Certificate with the name {0} already absent'.format(name)
- results['msg'] = msg
+ msg = f"Certificate with the name {name} already absent"
+ results["msg"] = msg
else:
if new_name or new_path:
changed = rename_server_certificate(current_cert)
@@ -382,16 +368,13 @@ def main():
changed = create_server_certificate()
updated_cert = get_server_certificate(name)
- results['server_certificate'] = updated_cert
+ results["server_certificate"] = updated_cert
compat_results = compatability_results(updated_cert)
if compat_results:
results.update(compat_results)
- module.exit_json(
- changed=changed,
- **results
- )
+ module.exit_json(changed=changed, **results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py b/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
index ee0dc590d..5504cb746 100644
--- a/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
+++ b/ansible_collections/community/aws/plugins/modules/iam_server_certificate_info.py
@@ -1,32 +1,30 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: iam_server_certificate_info
version_added: 1.0.0
short_description: Retrieve the information of a server certificate
description:
- Retrieve the attributes of a server certificate.
-author: "Allen Sanabria (@linuxdynasty)"
+author:
+ - "Allen Sanabria (@linuxdynasty)"
options:
name:
description:
- The name of the server certificate you are retrieving attributes for.
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Retrieve server certificate
community.aws.iam_server_certificate_info:
name: production-cert
@@ -37,9 +35,9 @@ EXAMPLES = '''
name: production-cert
register: server_cert
failed_when: "{{ server_cert.results | length == 0 }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
server_certificate_id:
description: The 21 character certificate id
returned: success
@@ -75,16 +73,15 @@ upload_date:
returned: success
type: str
sample: "2015-04-25T00:36:40+00:00"
-'''
+"""
try:
import botocore
- import botocore.exceptions
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_server_certs(iam, name=None):
@@ -113,22 +110,24 @@ def get_server_certs(iam, name=None):
results = dict()
try:
if name:
- server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]]
else:
- server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+ server_certs = iam.list_server_certificates()["ServerCertificateMetadataList"]
for server_cert in server_certs:
if not name:
- server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
- cert_md = server_cert['ServerCertificateMetadata']
- results[cert_md['ServerCertificateName']] = {
- 'certificate_body': server_cert['CertificateBody'],
- 'server_certificate_id': cert_md['ServerCertificateId'],
- 'server_certificate_name': cert_md['ServerCertificateName'],
- 'arn': cert_md['Arn'],
- 'path': cert_md['Path'],
- 'expiration': cert_md['Expiration'].isoformat(),
- 'upload_date': cert_md['UploadDate'].isoformat(),
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert["ServerCertificateName"])[
+ "ServerCertificate"
+ ]
+ cert_md = server_cert["ServerCertificateMetadata"]
+ results[cert_md["ServerCertificateName"]] = {
+ "certificate_body": server_cert["CertificateBody"],
+ "server_certificate_id": cert_md["ServerCertificateId"],
+ "server_certificate_name": cert_md["ServerCertificateName"],
+ "arn": cert_md["Arn"],
+ "path": cert_md["Path"],
+ "expiration": cert_md["Expiration"].isoformat(),
+ "upload_date": cert_md["UploadDate"].isoformat(),
}
except botocore.exceptions.ClientError:
@@ -139,7 +138,7 @@ def get_server_certs(iam, name=None):
def main():
argument_spec = dict(
- name=dict(type='str'),
+ name=dict(type="str"),
)
module = AnsibleAWSModule(
@@ -148,14 +147,14 @@ def main():
)
try:
- iam = module.client('iam')
+ iam = module.client("iam")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- cert_name = module.params.get('name')
+ cert_name = module.params.get("name")
results = get_server_certs(iam, cert_name)
module.exit_json(results=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/inspector_target.py b/ansible_collections/community/aws/plugins/modules/inspector_target.py
index 2ec9e9a0e..f9ec6d53a 100644
--- a/ansible_collections/community/aws/plugins/modules/inspector_target.py
+++ b/ansible_collections/community/aws/plugins/modules/inspector_target.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2018 Dennis Conrad for Sainsbury's
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: inspector_target
version_added: 1.0.0
@@ -39,12 +37,12 @@ options:
- Required if I(state=present).
type: dict
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create my_target Assessment Target
community.aws.inspector_target:
name: my_target
@@ -62,9 +60,9 @@ EXAMPLES = '''
community.aws.inspector_target:
name: my_target
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
arn:
description: The ARN that specifies the Amazon Inspector assessment target.
returned: success
@@ -97,32 +95,32 @@ updated_at:
returned: success
type: str
sample: "2018-01-29T13:48:51.958000+00:00"
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- ansible_dict_to_boto3_tag_list,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- compare_aws_tags,
-)
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def main():
argument_spec = dict(
name=dict(required=True),
- state=dict(choices=['absent', 'present'], default='present'),
- tags=dict(type='dict'),
+ state=dict(choices=["absent", "present"], default="present"),
+ tags=dict(type="dict"),
)
- required_if = [['state', 'present', ['tags']]]
+ required_if = [["state", "present", ["tags"]]]
module = AnsibleAWSModule(
argument_spec=argument_spec,
@@ -130,29 +128,37 @@ def main():
required_if=required_if,
)
- name = module.params.get('name')
- state = module.params.get('state').lower()
- tags = module.params.get('tags')
+ name = module.params.get("name")
+ state = module.params.get("state").lower()
+ tags = module.params.get("tags")
if tags:
- tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
+ tags = ansible_dict_to_boto3_tag_list(tags, "key", "value")
- client = module.client('inspector')
+ client = module.client("inspector")
try:
existing_target_arn = client.list_assessment_targets(
- filter={'assessmentTargetNamePattern': name},
- ).get('assessmentTargetArns')[0]
+ filter={"assessmentTargetNamePattern": name},
+ ).get(
+ "assessmentTargetArns"
+ )[0]
existing_target = camel_dict_to_snake_dict(
client.describe_assessment_targets(
assessmentTargetArns=[existing_target_arn],
- ).get('assessmentTargets')[0]
+ ).get(
+ "assessmentTargets"
+ )[0]
)
- existing_resource_group_arn = existing_target.get('resource_group_arn')
- existing_resource_group_tags = client.describe_resource_groups(
- resourceGroupArns=[existing_resource_group_arn],
- ).get('resourceGroups')[0].get('tags')
+ existing_resource_group_arn = existing_target.get("resource_group_arn")
+ existing_resource_group_tags = (
+ client.describe_resource_groups(
+ resourceGroupArns=[existing_resource_group_arn],
+ )
+ .get("resourceGroups")[0]
+ .get("tags")
+ )
target_exists = True
except (
@@ -163,23 +169,18 @@ def main():
except IndexError:
target_exists = False
- if state == 'present' and target_exists:
+ if state == "present" and target_exists:
ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags)
- ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(
- existing_resource_group_tags
- )
- tags_to_add, tags_to_remove = compare_aws_tags(
- ansible_dict_tags,
- ansible_dict_existing_tags
- )
+ ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(existing_resource_group_tags)
+ tags_to_add, tags_to_remove = compare_aws_tags(ansible_dict_tags, ansible_dict_existing_tags)
if not (tags_to_add or tags_to_remove):
- existing_target.update({'tags': ansible_dict_existing_tags})
+ existing_target.update({"tags": ansible_dict_existing_tags})
module.exit_json(changed=False, **existing_target)
else:
try:
updated_resource_group_arn = client.create_resource_group(
resourceGroupTags=tags,
- ).get('resourceGroupArn')
+ ).get("resourceGroupArn")
client.update_assessment_target(
assessmentTargetArn=existing_target_arn,
@@ -190,10 +191,12 @@ def main():
updated_target = camel_dict_to_snake_dict(
client.describe_assessment_targets(
assessmentTargetArns=[existing_target_arn],
- ).get('assessmentTargets')[0]
+ ).get(
+ "assessmentTargets"
+ )[0]
)
- updated_target.update({'tags': ansible_dict_tags})
+ updated_target.update({"tags": ansible_dict_tags})
module.exit_json(changed=True, **updated_target)
except (
botocore.exceptions.BotoCoreError,
@@ -201,24 +204,26 @@ def main():
) as e:
module.fail_json_aws(e, msg="trying to update target")
- elif state == 'present' and not target_exists:
+ elif state == "present" and not target_exists:
try:
new_resource_group_arn = client.create_resource_group(
resourceGroupTags=tags,
- ).get('resourceGroupArn')
+ ).get("resourceGroupArn")
new_target_arn = client.create_assessment_target(
assessmentTargetName=name,
resourceGroupArn=new_resource_group_arn,
- ).get('assessmentTargetArn')
+ ).get("assessmentTargetArn")
new_target = camel_dict_to_snake_dict(
client.describe_assessment_targets(
assessmentTargetArns=[new_target_arn],
- ).get('assessmentTargets')[0]
+ ).get(
+ "assessmentTargets"
+ )[0]
)
- new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)})
+ new_target.update({"tags": boto3_tag_list_to_ansible_dict(tags)})
module.exit_json(changed=True, **new_target)
except (
botocore.exceptions.BotoCoreError,
@@ -226,7 +231,7 @@ def main():
) as e:
module.fail_json_aws(e, msg="trying to create target")
- elif state == 'absent' and target_exists:
+ elif state == "absent" and target_exists:
try:
client.delete_assessment_target(
assessmentTargetArn=existing_target_arn,
@@ -238,9 +243,9 @@ def main():
) as e:
module.fail_json_aws(e, msg="trying to delete target")
- elif state == 'absent' and not target_exists:
+ elif state == "absent" and not target_exists:
module.exit_json(changed=False)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/kinesis_stream.py b/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
index e4c5d76df..d1ba65c86 100644
--- a/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
+++ b/ansible_collections/community/aws/plugins/modules/kinesis_stream.py
@@ -1,22 +1,21 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: kinesis_stream
version_added: 1.0.0
short_description: Manage a Kinesis Stream.
description:
- - Create or Delete a Kinesis Stream.
- - Update the retention period of a Kinesis Stream.
- - Update Tags on a Kinesis Stream.
- - Enable/disable server side encryption on a Kinesis Stream.
-author: Allen Sanabria (@linuxdynasty)
+ - Create or Delete a Kinesis Stream.
+ - Update the retention period of a Kinesis Stream.
+ - Update Tags on a Kinesis Stream.
+ - Enable/disable server side encryption on a Kinesis Stream.
+author:
+ - Allen Sanabria (@linuxdynasty)
options:
name:
description:
@@ -73,13 +72,12 @@ options:
- The GUID or alias for the KMS key.
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic creation example:
@@ -148,9 +146,9 @@ EXAMPLES = '''
wait: true
wait_timeout: 600
register: test_stream
-'''
+"""
-RETURN = '''
+RETURN = r"""
stream_name:
description: The name of the Kinesis Stream.
returned: when state == present.
@@ -179,7 +177,7 @@ tags:
"Name": "Splunk",
"Env": "development"
}
-'''
+"""
import time
@@ -191,9 +189,10 @@ except ImportError:
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_tags(client, stream_name):
@@ -210,16 +209,14 @@ def get_tags(client, stream_name):
Returns:
Tuple (bool, str, dict)
"""
- err_msg = ''
+ err_msg = ""
success = False
params = {
- 'StreamName': stream_name,
+ "StreamName": stream_name,
}
results = dict()
try:
- results = (
- client.list_tags_for_stream(**params)['Tags']
- )
+ results = client.list_tags_for_stream(**params)["Tags"]
success = True
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
@@ -240,28 +237,26 @@ def find_stream(client, stream_name):
Returns:
Tuple (bool, str, dict)
"""
- err_msg = ''
+ err_msg = ""
success = False
params = {
- 'StreamName': stream_name,
+ "StreamName": stream_name,
}
results = dict()
has_more_shards = True
shards = list()
try:
while has_more_shards:
- results = (
- client.describe_stream(**params)['StreamDescription']
- )
- shards.extend(results.pop('Shards'))
- has_more_shards = results['HasMoreShards']
+ results = client.describe_stream(**params)["StreamDescription"]
+ shards.extend(results.pop("Shards"))
+ has_more_shards = results["HasMoreShards"]
if has_more_shards:
- params['ExclusiveStartShardId'] = shards[-1]['ShardId']
- results['Shards'] = shards
- num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']])
- results['OpenShardsCount'] = len(shards) - num_closed_shards
- results['ClosedShardsCount'] = num_closed_shards
- results['ShardsCount'] = len(shards)
+ params["ExclusiveStartShardId"] = shards[-1]["ShardId"]
+ results["Shards"] = shards
+ num_closed_shards = len([s for s in shards if "EndingSequenceNumber" in s["SequenceNumberRange"]])
+ results["OpenShardsCount"] = len(shards) - num_closed_shards
+ results["ClosedShardsCount"] = num_closed_shards
+ results["ShardsCount"] = len(shards)
success = True
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
@@ -269,8 +264,7 @@ def find_stream(client, stream_name):
return success, err_msg, results
-def wait_for_status(client, stream_name, status, wait_timeout=300,
- check_mode=False):
+def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=False):
"""Wait for the status to change for a Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client
@@ -299,16 +293,14 @@ def wait_for_status(client, stream_name, status, wait_timeout=300,
while wait_timeout > time.time():
try:
- find_success, find_msg, stream = (
- find_stream(client, stream_name)
- )
+ find_success, find_msg, stream = find_stream(client, stream_name)
if check_mode:
status_achieved = True
break
- elif status != 'DELETING':
+ elif status != "DELETING":
if find_success and stream:
- if stream.get('StreamStatus') == status:
+ if stream.get("StreamStatus") == status:
status_achieved = True
break
@@ -325,12 +317,12 @@ def wait_for_status(client, stream_name, status, wait_timeout=300,
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
else:
- err_msg = "Status {0} achieved successfully".format(status)
+ err_msg = f"Status {status} achieved successfully"
return status_achieved, err_msg, stream
-def tags_action(client, stream_name, tags, action='create', check_mode=False):
+def tags_action(client, stream_name, tags, action="create", check_mode=False):
"""Create or delete multiple tags from a Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -357,26 +349,26 @@ def tags_action(client, stream_name, tags, action='create', check_mode=False):
"""
success = False
err_msg = ""
- params = {'StreamName': stream_name}
+ params = {"StreamName": stream_name}
try:
if not check_mode:
- if action == 'create':
- params['Tags'] = tags
+ if action == "create":
+ params["Tags"] = tags
client.add_tags_to_stream(**params)
success = True
- elif action == 'delete':
- params['TagKeys'] = tags
+ elif action == "delete":
+ params["TagKeys"] = tags
client.remove_tags_from_stream(**params)
success = True
else:
- err_msg = 'Invalid action {0}'.format(action)
+ err_msg = f"Invalid action {action}"
else:
- if action == 'create':
+ if action == "create":
success = True
- elif action == 'delete':
+ elif action == "delete":
success = True
else:
- err_msg = 'Invalid action {0}'.format(action)
+ err_msg = f"Invalid action {action}"
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
@@ -406,32 +398,25 @@ def update_tags(client, stream_name, tags, check_mode=False):
"""
success = False
changed = False
- err_msg = ''
- tag_success, tag_msg, current_tags = (
- get_tags(client, stream_name)
- )
+ err_msg = ""
+ tag_success, tag_msg, current_tags = get_tags(client, stream_name)
tags_to_set, tags_to_delete = compare_aws_tags(
- current_tags, tags,
+ current_tags,
+ tags,
purge_tags=True,
)
if tags_to_delete:
- delete_success, delete_msg = (
- tags_action(
- client, stream_name, tags_to_delete, action='delete',
- check_mode=check_mode
- )
+ delete_success, delete_msg = tags_action(
+ client, stream_name, tags_to_delete, action="delete", check_mode=check_mode
)
if not delete_success:
return delete_success, changed, delete_msg
- tag_msg = 'Tags removed'
+ tag_msg = "Tags removed"
if tags_to_set:
- create_success, create_msg = (
- tags_action(
- client, stream_name, tags_to_set, action='create',
- check_mode=check_mode
- )
+ create_success, create_msg = tags_action(
+ client, stream_name, tags_to_set, action="create", check_mode=check_mode
)
if create_success:
changed = True
@@ -440,8 +425,7 @@ def update_tags(client, stream_name, tags, check_mode=False):
return success, changed, err_msg
-def stream_action(client, stream_name, shard_count=1, action='create',
- timeout=300, check_mode=False):
+def stream_action(client, stream_name, shard_count=1, action="create", timeout=300, check_mode=False):
"""Create or Delete an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -465,28 +449,26 @@ def stream_action(client, stream_name, shard_count=1, action='create',
List (bool, str)
"""
success = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
+ err_msg = ""
+ params = {"StreamName": stream_name}
try:
if not check_mode:
- if action == 'create':
- params['ShardCount'] = shard_count
+ if action == "create":
+ params["ShardCount"] = shard_count
client.create_stream(**params)
success = True
- elif action == 'delete':
+ elif action == "delete":
client.delete_stream(**params)
success = True
else:
- err_msg = 'Invalid action {0}'.format(action)
+ err_msg = f"Invalid action {action}"
else:
- if action == 'create':
+ if action == "create":
success = True
- elif action == 'delete':
+ elif action == "delete":
success = True
else:
- err_msg = 'Invalid action {0}'.format(action)
+ err_msg = f"Invalid action {action}"
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
@@ -494,8 +476,9 @@ def stream_action(client, stream_name, shard_count=1, action='create',
return success, err_msg
-def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='',
- timeout=300, check_mode=False):
+def stream_encryption_action(
+ client, stream_name, action="start_encryption", encryption_type="", key_id="", timeout=300, check_mode=False
+):
"""Create, Encrypt or Delete an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -521,31 +504,29 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc
List (bool, str)
"""
success = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
+ err_msg = ""
+ params = {"StreamName": stream_name}
try:
if not check_mode:
- if action == 'start_encryption':
- params['EncryptionType'] = encryption_type
- params['KeyId'] = key_id
+ if action == "start_encryption":
+ params["EncryptionType"] = encryption_type
+ params["KeyId"] = key_id
client.start_stream_encryption(**params)
success = True
- elif action == 'stop_encryption':
- params['EncryptionType'] = encryption_type
- params['KeyId'] = key_id
+ elif action == "stop_encryption":
+ params["EncryptionType"] = encryption_type
+ params["KeyId"] = key_id
client.stop_stream_encryption(**params)
success = True
else:
- err_msg = 'Invalid encryption action {0}'.format(action)
+ err_msg = f"Invalid encryption action {action}"
else:
- if action == 'start_encryption':
+ if action == "start_encryption":
success = True
- elif action == 'stop_encryption':
+ elif action == "stop_encryption":
success = True
else:
- err_msg = 'Invalid encryption action {0}'.format(action)
+ err_msg = f"Invalid encryption action {action}"
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
@@ -553,8 +534,7 @@ def stream_encryption_action(client, stream_name, action='start_encryption', enc
return success, err_msg
-def retention_action(client, stream_name, retention_period=24,
- action='increase', check_mode=False):
+def retention_action(client, stream_name, retention_period=24, action="increase", check_mode=False):
"""Increase or Decrease the retention of messages in the Kinesis stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -579,35 +559,29 @@ def retention_action(client, stream_name, retention_period=24,
Tuple (bool, str)
"""
success = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
+ err_msg = ""
+ params = {"StreamName": stream_name}
try:
if not check_mode:
- if action == 'increase':
- params['RetentionPeriodHours'] = retention_period
+ if action == "increase":
+ params["RetentionPeriodHours"] = retention_period
client.increase_stream_retention_period(**params)
success = True
- err_msg = (
- 'Retention Period increased successfully to {0}'.format(retention_period)
- )
- elif action == 'decrease':
- params['RetentionPeriodHours'] = retention_period
+ err_msg = f"Retention Period increased successfully to {retention_period}"
+ elif action == "decrease":
+ params["RetentionPeriodHours"] = retention_period
client.decrease_stream_retention_period(**params)
success = True
- err_msg = (
- 'Retention Period decreased successfully to {0}'.format(retention_period)
- )
+ err_msg = f"Retention Period decreased successfully to {retention_period}"
else:
- err_msg = 'Invalid action {0}'.format(action)
+ err_msg = f"Invalid action {action}"
else:
- if action == 'increase':
+ if action == "increase":
success = True
- elif action == 'decrease':
+ elif action == "decrease":
success = True
else:
- err_msg = 'Invalid action {0}'.format(action)
+ err_msg = f"Invalid action {action}"
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
@@ -637,13 +611,10 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False
Tuple (bool, str)
"""
success = True
- err_msg = ''
- params = {
- 'StreamName': stream_name,
- 'ScalingType': 'UNIFORM_SCALING'
- }
+ err_msg = ""
+ params = {"StreamName": stream_name, "ScalingType": "UNIFORM_SCALING"}
if not check_mode:
- params['TargetShardCount'] = number_of_shards
+ params["TargetShardCount"] = number_of_shards
try:
client.update_shard_count(**params)
except botocore.exceptions.ClientError as e:
@@ -652,8 +623,17 @@ def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False
return success, err_msg
-def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None,
- tags=None, wait=False, wait_timeout=300, check_mode=False):
+def update(
+ client,
+ current_stream,
+ stream_name,
+ number_of_shards=1,
+ retention_period=None,
+ tags=None,
+ wait=False,
+ wait_timeout=300,
+ check_mode=False,
+):
"""Update an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -693,44 +673,29 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe
"""
success = True
changed = False
- err_msg = ''
+ err_msg = ""
if retention_period:
if wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ wait_success, wait_msg, current_stream = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
if not wait_success:
return wait_success, False, wait_msg
- if current_stream.get('StreamStatus') == 'ACTIVE':
+ if current_stream.get("StreamStatus") == "ACTIVE":
retention_changed = False
- if retention_period > current_stream['RetentionPeriodHours']:
- retention_changed, retention_msg = (
- retention_action(
- client, stream_name, retention_period, action='increase',
- check_mode=check_mode
- )
+ if retention_period > current_stream["RetentionPeriodHours"]:
+ retention_changed, retention_msg = retention_action(
+ client, stream_name, retention_period, action="increase", check_mode=check_mode
)
- elif retention_period < current_stream['RetentionPeriodHours']:
- retention_changed, retention_msg = (
- retention_action(
- client, stream_name, retention_period, action='decrease',
- check_mode=check_mode
- )
+ elif retention_period < current_stream["RetentionPeriodHours"]:
+ retention_changed, retention_msg = retention_action(
+ client, stream_name, retention_period, action="decrease", check_mode=check_mode
)
- elif retention_period == current_stream['RetentionPeriodHours']:
- retention_msg = (
- 'Retention {0} is the same as {1}'
- .format(
- retention_period,
- current_stream['RetentionPeriodHours']
- )
- )
+ elif retention_period == current_stream["RetentionPeriodHours"]:
+ retention_msg = f"Retention {retention_period} is the same as {current_stream['RetentionPeriodHours']}"
success = True
if retention_changed:
@@ -739,36 +704,26 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe
err_msg = retention_msg
if changed and wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ wait_success, wait_msg, current_stream = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
if not wait_success:
return wait_success, False, wait_msg
elif changed and not wait:
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
if stream_found:
- if current_stream['StreamStatus'] != 'ACTIVE':
- err_msg = (
- 'Retention Period for {0} is in the process of updating'
- .format(stream_name)
- )
+ if current_stream["StreamStatus"] != "ACTIVE":
+ err_msg = f"Retention Period for {stream_name} is in the process of updating"
return success, changed, err_msg
else:
err_msg = (
- 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
- .format(current_stream.get('StreamStatus', 'UNKNOWN'))
+ "StreamStatus has to be ACTIVE in order to modify the retention period."
+ f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}"
)
return success, changed, err_msg
- if current_stream['OpenShardsCount'] != number_of_shards:
- success, err_msg = (
- update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode)
- )
+ if current_stream["OpenShardsCount"] != number_of_shards:
+ success, err_msg = update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode)
if not success:
return success, changed, err_msg
@@ -776,47 +731,42 @@ def update(client, current_stream, stream_name, number_of_shards=1, retention_pe
changed = True
if wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ wait_success, wait_msg, current_stream = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
if not wait_success:
return wait_success, changed, wait_msg
else:
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
- if stream_found and current_stream['StreamStatus'] != 'ACTIVE':
- err_msg = (
- 'Number of shards for {0} is in the process of updating'
- .format(stream_name)
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
+ if stream_found and current_stream["StreamStatus"] != "ACTIVE":
+ err_msg = f"Number of shards for {stream_name} is in the process of updating"
return success, changed, err_msg
if tags:
- tag_success, tag_changed, err_msg = (
- update_tags(client, stream_name, tags, check_mode=check_mode)
- )
+ tag_success, tag_changed, err_msg = update_tags(client, stream_name, tags, check_mode=check_mode)
changed |= tag_changed
if wait:
- success, err_msg, status_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ success, err_msg, status_stream = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
if success and changed:
- err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} updated successfully."
elif success and not changed:
- err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} did not change."
return success, changed, err_msg
-def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
- tags=None, wait=False, wait_timeout=300, check_mode=False):
+def create_stream(
+ client,
+ stream_name,
+ number_of_shards=1,
+ retention_period=None,
+ tags=None,
+ wait=False,
+ wait_timeout=300,
+ check_mode=False,
+):
"""Create an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -848,79 +798,59 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None
"""
success = False
changed = False
- err_msg = ''
+ err_msg = ""
results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
- if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ if stream_found and current_stream.get("StreamStatus") == "DELETING" and wait:
+ wait_success, wait_msg, current_stream = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
- if stream_found and current_stream.get('StreamStatus') != 'DELETING':
+ if stream_found and current_stream.get("StreamStatus") != "DELETING":
success, changed, err_msg = update(
- client, current_stream, stream_name, number_of_shards,
- retention_period, tags, wait, wait_timeout, check_mode=check_mode
+ client,
+ current_stream,
+ stream_name,
+ number_of_shards,
+ retention_period,
+ tags,
+ wait,
+ wait_timeout,
+ check_mode=check_mode,
)
else:
- create_success, create_msg = (
- stream_action(
- client, stream_name, number_of_shards, action='create',
- check_mode=check_mode
- )
+ create_success, create_msg = stream_action(
+ client, stream_name, number_of_shards, action="create", check_mode=check_mode
)
if not create_success:
changed = True
- err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg)
+ err_msg = f"Failed to create Kinesis stream: {create_msg}"
return False, True, err_msg, {}
else:
changed = True
if wait:
- wait_success, wait_msg, results = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- err_msg = (
- 'Kinesis Stream {0} is in the process of being created'
- .format(stream_name)
+ wait_success, wait_msg, results = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
+ err_msg = f"Kinesis Stream {stream_name} is in the process of being created"
if not wait_success:
return wait_success, True, wait_msg, results
else:
- err_msg = (
- 'Kinesis Stream {0} created successfully'
- .format(stream_name)
- )
+ err_msg = f"Kinesis Stream {stream_name} created successfully"
if tags:
- changed, err_msg = (
- tags_action(
- client, stream_name, tags, action='create',
- check_mode=check_mode
- )
- )
+ changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode)
if changed:
success = True
if not success:
return success, changed, err_msg, results
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
- if retention_period and current_stream.get('StreamStatus') == 'ACTIVE':
- changed, err_msg = (
- retention_action(
- client, stream_name, retention_period, action='increase',
- check_mode=check_mode
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
+ if retention_period and current_stream.get("StreamStatus") == "ACTIVE":
+ changed, err_msg = retention_action(
+ client, stream_name, retention_period, action="increase", check_mode=check_mode
)
if changed:
success = True
@@ -928,19 +858,15 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None
return success, changed, err_msg, results
else:
err_msg = (
- 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
- .format(current_stream.get('StreamStatus', 'UNKNOWN'))
+ "StreamStatus has to be ACTIVE in order to modify the retention period."
+ f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}"
)
success = create_success
changed = True
if success:
- stream_found, stream_msg, results = (
- find_stream(client, stream_name)
- )
- tag_success, tag_msg, current_tags = (
- get_tags(client, stream_name)
- )
+ stream_found, stream_msg, results = find_stream(client, stream_name)
+ tag_success, tag_msg, current_tags = get_tags(client, stream_name)
if check_mode:
current_tags = tags
@@ -948,13 +874,12 @@ def create_stream(client, stream_name, number_of_shards=1, retention_period=None
current_tags = dict()
results = camel_dict_to_snake_dict(results)
- results['tags'] = current_tags
+ results["tags"] = current_tags
return success, changed, err_msg, results
-def delete_stream(client, stream_name, wait=False, wait_timeout=300,
- check_mode=False):
+def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode=False):
"""Delete an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -978,44 +903,33 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300,
"""
success = False
changed = False
- err_msg = ''
+ err_msg = ""
results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
if stream_found:
- success, err_msg = (
- stream_action(
- client, stream_name, action='delete', check_mode=check_mode
- )
- )
+ success, err_msg = stream_action(client, stream_name, action="delete", check_mode=check_mode)
if success:
changed = True
if wait:
- success, err_msg, results = (
- wait_for_status(
- client, stream_name, 'DELETING', wait_timeout,
- check_mode=check_mode
- )
+ success, err_msg, results = wait_for_status(
+ client, stream_name, "DELETING", wait_timeout, check_mode=check_mode
)
- err_msg = 'Stream {0} deleted successfully'.format(stream_name)
+ err_msg = f"Stream {stream_name} deleted successfully"
if not success:
return success, True, err_msg, results
else:
- err_msg = (
- 'Stream {0} is in the process of being deleted'
- .format(stream_name)
- )
+ err_msg = f"Stream {stream_name} is in the process of being deleted"
else:
success = True
changed = False
- err_msg = 'Stream {0} does not exist'.format(stream_name)
+ err_msg = f"Stream {stream_name} does not exist"
return success, changed, err_msg, results
-def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
- wait=False, wait_timeout=300, check_mode=False):
+def start_stream_encryption(
+ client, stream_name, encryption_type="", key_id="", wait=False, wait_timeout=300, check_mode=False
+):
"""Start encryption on an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -1043,65 +957,56 @@ def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
"""
success = False
changed = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
+ err_msg = ""
+ params = {"StreamName": stream_name}
results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
if stream_found:
- if (current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id):
+ if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id:
changed = False
success = True
- err_msg = 'Kinesis Stream {0} encryption already configured.'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} encryption already configured."
else:
- success, err_msg = (
- stream_encryption_action(
- client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode
- )
+ success, err_msg = stream_encryption_action(
+ client,
+ stream_name,
+ action="start_encryption",
+ encryption_type=encryption_type,
+ key_id=key_id,
+ check_mode=check_mode,
)
if success:
changed = True
if wait:
- success, err_msg, results = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ success, err_msg, results = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
- err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} encryption started successfully."
if not success:
return success, True, err_msg, results
else:
- err_msg = (
- 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name)
- )
+ err_msg = f"Kinesis Stream {stream_name} is in the process of starting encryption."
else:
success = True
changed = False
- err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} does not exist"
if success:
- stream_found, stream_msg, results = (
- find_stream(client, stream_name)
- )
- tag_success, tag_msg, current_tags = (
- get_tags(client, stream_name)
- )
+ stream_found, stream_msg, results = find_stream(client, stream_name)
+ tag_success, tag_msg, current_tags = get_tags(client, stream_name)
if not current_tags:
current_tags = dict()
results = camel_dict_to_snake_dict(results)
- results['tags'] = current_tags
+ results["tags"] = current_tags
return success, changed, err_msg, results
-def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
- wait=True, wait_timeout=300, check_mode=False):
+def stop_stream_encryption(
+ client, stream_name, encryption_type="", key_id="", wait=True, wait_timeout=300, check_mode=False
+):
"""Stop encryption on an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
@@ -1127,57 +1032,47 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
"""
success = False
changed = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
+ err_msg = ""
+ params = {"StreamName": stream_name}
results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name)
- )
+ stream_found, stream_msg, current_stream = find_stream(client, stream_name)
if stream_found:
- if current_stream.get('EncryptionType') == 'KMS':
- success, err_msg = (
- stream_encryption_action(
- client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode
- )
+ if current_stream.get("EncryptionType") == "KMS":
+ success, err_msg = stream_encryption_action(
+ client,
+ stream_name,
+ action="stop_encryption",
+ key_id=key_id,
+ encryption_type=encryption_type,
+ check_mode=check_mode,
)
changed = success
if wait:
- success, err_msg, results = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
+ success, err_msg, results = wait_for_status(
+ client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode
)
if not success:
return success, True, err_msg, results
- err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} encryption stopped successfully."
else:
- err_msg = (
- 'Stream {0} is in the process of stopping encryption.'.format(stream_name)
- )
- elif current_stream.get('EncryptionType') == 'NONE':
+ err_msg = f"Stream {stream_name} is in the process of stopping encryption."
+ elif current_stream.get("EncryptionType") == "NONE":
success = True
- err_msg = 'Kinesis Stream {0} encryption already stopped.'.format(stream_name)
+ err_msg = f"Kinesis Stream {stream_name} encryption already stopped."
else:
success = True
changed = False
- err_msg = 'Stream {0} does not exist.'.format(stream_name)
+ err_msg = f"Stream {stream_name} does not exist."
if success:
- stream_found, stream_msg, results = (
- find_stream(client, stream_name)
- )
- tag_success, tag_msg, current_tags = (
- get_tags(client, stream_name)
- )
+ stream_found, stream_msg, results = find_stream(client, stream_name)
+ tag_success, tag_msg, current_tags = get_tags(client, stream_name)
if not current_tags:
current_tags = dict()
results = camel_dict_to_snake_dict(results)
- results['tags'] = current_tags
+ results["tags"] = current_tags
return success, changed, err_msg, results
@@ -1185,78 +1080,65 @@ def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
def main():
argument_spec = dict(
name=dict(required=True),
- shards=dict(default=None, required=False, type='int'),
- retention_period=dict(default=None, required=False, type='int'),
- tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
- wait=dict(default=True, required=False, type='bool'),
- wait_timeout=dict(default=300, required=False, type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- encryption_type=dict(required=False, choices=['NONE', 'KMS']),
- key_id=dict(required=False, type='str'),
- encryption_state=dict(required=False, choices=['enabled', 'disabled']),
+ shards=dict(default=None, required=False, type="int"),
+ retention_period=dict(default=None, required=False, type="int"),
+ tags=dict(default=None, required=False, type="dict", aliases=["resource_tags"]),
+ wait=dict(default=True, required=False, type="bool"),
+ wait_timeout=dict(default=300, required=False, type="int"),
+ state=dict(default="present", choices=["present", "absent"]),
+ encryption_type=dict(required=False, choices=["NONE", "KMS"]),
+ key_id=dict(required=False, type="str"),
+ encryption_state=dict(required=False, choices=["enabled", "disabled"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
- retention_period = module.params.get('retention_period')
- stream_name = module.params.get('name')
- shards = module.params.get('shards')
- state = module.params.get('state')
- tags = module.params.get('tags')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- encryption_type = module.params.get('encryption_type')
- key_id = module.params.get('key_id')
- encryption_state = module.params.get('encryption_state')
+ retention_period = module.params.get("retention_period")
+ stream_name = module.params.get("name")
+ shards = module.params.get("shards")
+ state = module.params.get("state")
+ tags = module.params.get("tags")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
+ encryption_type = module.params.get("encryption_type")
+ key_id = module.params.get("key_id")
+ encryption_state = module.params.get("encryption_state")
- if state == 'present' and not shards:
- module.fail_json(msg='Shards is required when state == present.')
+ if state == "present" and not shards:
+ module.fail_json(msg="Shards is required when state == present.")
if retention_period:
if retention_period < 24:
- module.fail_json(msg='Retention period can not be less than 24 hours.')
+ module.fail_json(msg="Retention period can not be less than 24 hours.")
check_mode = module.check_mode
try:
- client = module.client('kinesis')
+ client = module.client("kinesis")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- if state == 'present':
- success, changed, err_msg, results = (
- create_stream(
- client, stream_name, shards, retention_period, tags,
- wait, wait_timeout, check_mode
- )
+ if state == "present":
+ success, changed, err_msg, results = create_stream(
+ client, stream_name, shards, retention_period, tags, wait, wait_timeout, check_mode
)
- if encryption_state == 'enabled':
- success, changed, err_msg, results = (
- start_stream_encryption(
- client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
- )
+ if encryption_state == "enabled":
+ success, changed, err_msg, results = start_stream_encryption(
+ client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
)
- elif encryption_state == 'disabled':
- success, changed, err_msg, results = (
- stop_stream_encryption(
- client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
- )
+ elif encryption_state == "disabled":
+ success, changed, err_msg, results = stop_stream_encryption(
+ client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
)
- elif state == 'absent':
- success, changed, err_msg, results = (
- delete_stream(client, stream_name, wait, wait_timeout, check_mode)
- )
+ elif state == "absent":
+ success, changed, err_msg, results = delete_stream(client, stream_name, wait, wait_timeout, check_mode)
if success:
- module.exit_json(
- success=success, changed=changed, msg=err_msg, **results
- )
+ module.exit_json(success=success, changed=changed, msg=err_msg, **results)
else:
- module.fail_json(
- success=success, changed=changed, msg=err_msg, result=results
- )
+ module.fail_json(success=success, changed=changed, msg=err_msg, result=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/lightsail.py b/ansible_collections/community/aws/plugins/modules/lightsail.py
index 5e4035154..16b4338e7 100644
--- a/ansible_collections/community/aws/plugins/modules/lightsail.py
+++ b/ansible_collections/community/aws/plugins/modules/lightsail.py
@@ -1,23 +1,20 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: lightsail
version_added: 1.0.0
short_description: Manage instances in AWS Lightsail
description:
- - Manage instances in AWS Lightsail.
- - Instance tagging is not yet supported in this module.
+ - Manage instances in AWS Lightsail.
+ - Instance tagging is not yet supported in this module.
author:
- - "Nick Ball (@nickball)"
- - "Prasad Katti (@prasadkatti)"
+ - "Nick Ball (@nickball)"
+ - "Prasad Katti (@prasadkatti)"
options:
state:
description:
@@ -50,6 +47,38 @@ options:
- Launch script that can configure the instance with additional data.
type: str
default: ''
+ public_ports:
+ description:
+ - A list of dictionaries to describe the ports to open for the specified instance.
+ type: list
+ elements: dict
+ suboptions:
+ from_port:
+ description: The first port in a range of open ports on the instance.
+ type: int
+ required: true
+ to_port:
+ description: The last port in a range of open ports on the instance.
+ type: int
+ required: true
+ protocol:
+ description: The IP protocol name accepted for the defined range of open ports.
+ type: str
+ choices: ['tcp', 'all', 'udp', 'icmp']
+ required: true
+ cidrs:
+ description:
+ - The IPv4 address, or range of IPv4 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol.
+ - One of I(cidrs) or I(ipv6_cidrs) must be specified.
+ type: list
+ elements: str
+ ipv6_cidrs:
+ description:
+ - The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to the instance through the ports, and the protocol.
+ - One of I(cidrs) or I(ipv6_cidrs) must be specified.
+ type: list
+ elements: str
+ version_added: 6.0.0
key_pair_name:
description:
- Name of the key pair to use with the instance.
@@ -69,14 +98,13 @@ options:
type: int
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a new Lightsail instance
community.aws.lightsail:
state: present
@@ -87,6 +115,12 @@ EXAMPLES = '''
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
+ public_ports:
+ - from_port: 22
+ to_port: 22
+ protocol: "tcp"
+ cidrs: ["0.0.0.0/0"]
+ ipv6_cidrs: ["::/0"]
register: my_instance
- name: Delete an instance
@@ -94,10 +128,9 @@ EXAMPLES = '''
state: absent
region: us-east-1
name: my_instance
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
changed:
description: if a snapshot has been modified/created
returned: always
@@ -149,7 +182,7 @@ instance:
name: running
support_code: "123456789012/i-0997c97831ee21e33"
username: "ubuntu"
-'''
+"""
import time
@@ -160,22 +193,23 @@ except ImportError:
pass
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-def find_instance_info(module, client, instance_name, fail_if_not_found=False):
+def find_instance_info(module, client, instance_name, fail_if_not_found=False):
try:
res = client.get_instance(instanceName=instance_name)
- except is_boto3_error_code('NotFoundException') as e:
+ except is_boto3_error_code("NotFoundException") as e:
if fail_if_not_found:
module.fail_json_aws(e)
return None
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
- return res['instance']
+ return res["instance"]
def wait_for_instance_state(module, client, instance_name, states):
@@ -183,53 +217,69 @@ def wait_for_instance_state(module, client, instance_name, states):
`states` is a list of instance states that we are waiting for.
"""
- wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = module.params.get("wait_timeout")
wait_max = time.time() + wait_timeout
while wait_max > time.time():
try:
instance = find_instance_info(module, client, instance_name)
- if instance['state']['name'] in states:
+ if instance["state"]["name"] in states:
break
time.sleep(5)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
else:
- module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -'
- ' {1}'.format(instance_name, states))
+ module.fail_json(
+ msg=f'Timed out waiting for instance "{instance_name}" to get to one of the following states - {states}'
+ )
-def create_instance(module, client, instance_name):
+def update_public_ports(module, client, instance_name):
+ try:
+ client.put_instance_public_ports(
+ portInfos=snake_dict_to_camel_dict(module.params.get("public_ports")),
+ instanceName=instance_name,
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+def create_or_update_instance(module, client, instance_name):
inst = find_instance_info(module, client, instance_name)
- if inst:
- module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst))
- else:
- create_params = {'instanceNames': [instance_name],
- 'availabilityZone': module.params.get('zone'),
- 'blueprintId': module.params.get('blueprint_id'),
- 'bundleId': module.params.get('bundle_id'),
- 'userData': module.params.get('user_data')}
- key_pair_name = module.params.get('key_pair_name')
+ if not inst:
+ create_params = {
+ "instanceNames": [instance_name],
+ "availabilityZone": module.params.get("zone"),
+ "blueprintId": module.params.get("blueprint_id"),
+ "bundleId": module.params.get("bundle_id"),
+ "userData": module.params.get("user_data"),
+ }
+
+ key_pair_name = module.params.get("key_pair_name")
if key_pair_name:
- create_params['keyPairName'] = key_pair_name
+ create_params["keyPairName"] = key_pair_name
try:
client.create_instances(**create_params)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
- wait = module.params.get('wait')
+ wait = module.params.get("wait")
if wait:
- desired_states = ['running']
+ desired_states = ["running"]
wait_for_instance_state(module, client, instance_name, desired_states)
- inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
- module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst))
+ if module.params.get("public_ports") is not None:
+ update_public_ports(module, client, instance_name)
+ after_update_inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
+ module.exit_json(
+ changed=after_update_inst != inst,
+ instance=camel_dict_to_snake_dict(after_update_inst),
+ )
-def delete_instance(module, client, instance_name):
+def delete_instance(module, client, instance_name):
changed = False
inst = find_instance_info(module, client, instance_name)
@@ -237,7 +287,7 @@ def delete_instance(module, client, instance_name):
module.exit_json(changed=changed, instance={})
# Wait for instance to exit transition state before deleting
- desired_states = ['running', 'stopped']
+ desired_states = ["running", "stopped"]
wait_for_instance_state(module, client, instance_name, desired_states)
try:
@@ -278,13 +328,13 @@ def start_or_stop_instance(module, client, instance_name, state):
inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
# Wait for instance to exit transition state before state change
- desired_states = ['running', 'stopped']
+ desired_states = ["running", "stopped"]
wait_for_instance_state(module, client, instance_name, desired_states)
# Try state change
- if inst and inst['state']['name'] != state:
+ if inst and inst["state"]["name"] != state:
try:
- if state == 'running':
+ if state == "running":
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
@@ -294,7 +344,7 @@ def start_or_stop_instance(module, client, instance_name, state):
# Grab current instance info
inst = find_instance_info(module, client, instance_name)
- wait = module.params.get('wait')
+ wait = module.params.get("wait")
if wait:
desired_states = [state]
wait_for_instance_state(module, client, instance_name, desired_states)
@@ -304,37 +354,50 @@ def start_or_stop_instance(module, client, instance_name, state):
def main():
-
argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted',
- 'rebooted']),
- zone=dict(type='str'),
- blueprint_id=dict(type='str'),
- bundle_id=dict(type='str'),
- key_pair_name=dict(type='str'),
- user_data=dict(type='str', default=''),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=300, type='int'),
+ name=dict(type="str", required=True),
+ state=dict(
+ type="str", default="present", choices=["present", "absent", "stopped", "running", "restarted", "rebooted"]
+ ),
+ zone=dict(type="str"),
+ blueprint_id=dict(type="str"),
+ bundle_id=dict(type="str"),
+ key_pair_name=dict(type="str"),
+ user_data=dict(type="str", default=""),
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(default=300, type="int"),
+ public_ports=dict(
+ type="list",
+ elements="dict",
+ options=dict(
+ from_port=dict(type="int", required=True),
+ to_port=dict(type="int", required=True),
+ protocol=dict(type="str", choices=["tcp", "all", "udp", "icmp"], required=True),
+ cidrs=dict(type="list", elements="str"),
+ ipv6_cidrs=dict(type="list", elements="str"),
+ ),
+ required_one_of=[("cidrs", "ipv6_cidrs")],
+ ),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]])
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec, required_if=[["state", "present", ("zone", "blueprint_id", "bundle_id")]]
+ )
- client = module.client('lightsail')
+ client = module.client("lightsail")
- name = module.params.get('name')
- state = module.params.get('state')
+ name = module.params.get("name")
+ state = module.params.get("state")
- if state == 'present':
- create_instance(module, client, name)
- elif state == 'absent':
+ if state == "present":
+ create_or_update_instance(module, client, name)
+ elif state == "absent":
delete_instance(module, client, name)
- elif state in ('running', 'stopped'):
+ elif state in ("running", "stopped"):
start_or_stop_instance(module, client, name, state)
- elif state in ('restarted', 'rebooted'):
+ elif state in ("restarted", "rebooted"):
restart_instance(module, client, name)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py b/ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py
new file mode 100644
index 000000000..1d0d178aa
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/lightsail_snapshot.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: lightsail_snapshot
+version_added: "6.0.0"
+short_description: Creates snapshots of AWS Lightsail instances
+description:
+ - Creates snapshots of AWS Lightsail instances.
+author:
+ - "Nuno Saavedra (@Nfsaavedra)"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ snapshot_name:
+ description: Name of the new instance snapshot.
+ required: true
+ type: str
+ instance_name:
+ description:
+ - Name of the instance to create the snapshot.
+ - Required when I(state=present).
+ type: str
+ wait:
+ description:
+ - Wait for the instance snapshot to be created before returning.
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - How long before I(wait) gives up, in seconds.
+ default: 300
+ type: int
+
+extends_documentation_fragment:
+- amazon.aws.common.modules
+- amazon.aws.region.modules
+- amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: Create AWS Lightsail snapshot
+ lightsail_snapshot:
+ region: us-east-1
+ snapshot_name: "my_instance_snapshot"
+ instance_name: "my_instance"
+
+- name: Delete AWS Lightsail snapshot
+ lightsail_snapshot:
+ region: us-east-1
+ snapshot_name: "my_instance_snapshot"
+ state: absent
+"""
+
+RETURN = r"""
+changed:
+ description: if a snapshot has been modified/created
+ returned: always
+ type: bool
+ sample:
+ changed: true
+snapshot:
+ description: instance snapshot data
+ type: dict
+ returned: always
+ sample:
+ arn: "arn:aws:lightsail:us-east-1:070807442430:InstanceSnapshot/54b0f785-7132-443d-9e32-95a6825636a4"
+ created_at: "2023-02-23T18:46:11.183000+00:00"
+ from_attached_disks: []
+ from_blueprint_id: "amazon_linux_2"
+ from_bundle_id: "nano_2_0"
+ from_instance_arn: "arn:aws:lightsail:us-east-1:070807442430:Instance/5ca1e7ca-a994-4e19-bb82-deb9d79e9ca3"
+ from_instance_name: "my_instance"
+ is_from_auto_snapshot: false
+ location:
+ availability_zone: "all"
+ region_name: "us-east-1"
+ name: "my_instance_snapshot"
+ resource_type: "InstanceSnapshot"
+ size_in_gb: 20
+ state: "available"
+ support_code: "351201681302/ami-06b48e5589f1e248b"
+ tags: []
+"""
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ # will be caught by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+
+def find_instance_snapshot_info(module, client, instance_snapshot_name, fail_if_not_found=False):
+ try:
+ res = client.get_instance_snapshot(instanceSnapshotName=instance_snapshot_name)
+ except is_boto3_error_code("NotFoundException") as e:
+ if fail_if_not_found:
+ module.fail_json_aws(e)
+ return None
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ return res["instanceSnapshot"]
+
+
+def wait_for_instance_snapshot(module, client, instance_snapshot_name):
+ wait_timeout = module.params.get("wait_timeout")
+ wait_max = time.time() + wait_timeout
+ snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name)
+
+ while wait_max > time.time():
+ snapshot = find_instance_snapshot_info(module, client, instance_snapshot_name)
+ current_state = snapshot["state"]
+ if current_state != "pending":
+ break
+ time.sleep(5)
+ else:
+ module.fail_json(msg=f'Timed out waiting for instance snapshot "{instance_snapshot_name}" to be created.')
+
+ return snapshot
+
+
+def create_snapshot(module, client):
+ snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name"))
+ new_instance = snapshot is None
+
+ if module.check_mode or not new_instance:
+ snapshot = snapshot if snapshot is not None else {}
+ module.exit_json(
+ changed=new_instance,
+ instance_snapshot=camel_dict_to_snake_dict(snapshot),
+ )
+
+ try:
+ snapshot = client.create_instance_snapshot(
+ instanceSnapshotName=module.params.get("snapshot_name"),
+ instanceName=module.params.get("instance_name"),
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ if module.params.get("wait"):
+ snapshot = wait_for_instance_snapshot(module, client, module.params.get("snapshot_name"))
+
+ module.exit_json(
+ changed=new_instance,
+ instance_snapshot=camel_dict_to_snake_dict(snapshot),
+ )
+
+
+def delete_snapshot(module, client):
+ snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name"))
+ if module.check_mode or snapshot is None:
+ changed = not (snapshot is None)
+ instance = snapshot if changed else {}
+ module.exit_json(changed=changed, instance=instance)
+
+ try:
+ client.delete_instance_snapshot(instanceSnapshotName=module.params.get("snapshot_name"))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(changed=True, instance=camel_dict_to_snake_dict(snapshot))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ snapshot_name=dict(type="str", required=True),
+ instance_name=dict(type="str"),
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(default=300, type="int"),
+ )
+ required_if = [
+ ["state", "present", ("instance_name",)],
+ ]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
+ client = module.client("lightsail")
+
+ state = module.params.get("state")
+
+ if state == "present":
+ create_snapshot(module, client)
+ elif state == "absent":
+ delete_snapshot(module, client)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py b/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py
index 799ff629d..40d10a86b 100644
--- a/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py
+++ b/ansible_collections/community/aws/plugins/modules/lightsail_static_ip.py
@@ -1,14 +1,10 @@
#!/usr/bin/python
-
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: lightsail_static_ip
version_added: 4.1.0
@@ -29,13 +25,13 @@ options:
required: true
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Provision a Lightsail static IP
community.aws.lightsail_static_ip:
state: present
@@ -46,9 +42,9 @@ EXAMPLES = '''
community.aws.lightsail_static_ip:
state: absent
name: my_static_ip
-'''
+"""
-RETURN = '''
+RETURN = r"""
static_ip:
description: static_ipinstance data
returned: always
@@ -64,7 +60,7 @@ static_ip:
name: "static_ip"
resource_type: StaticIp
support_code: "123456789012/192.0.2.5"
-'''
+"""
try:
import botocore
@@ -74,30 +70,29 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False):
+def find_static_ip_info(module, client, static_ip_name, fail_if_not_found=False):
try:
res = client.get_static_ip(staticIpName=static_ip_name)
- except is_boto3_error_code('NotFoundException') as e:
+ except is_boto3_error_code("NotFoundException") as e:
if fail_if_not_found:
module.fail_json_aws(e)
return None
except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
- return res['staticIp']
+ return res["staticIp"]
def create_static_ip(module, client, static_ip_name):
-
inst = find_static_ip_info(module, client, static_ip_name)
if inst:
module.exit_json(changed=False, static_ip=camel_dict_to_snake_dict(inst))
else:
- create_params = {'staticIpName': static_ip_name}
+ create_params = {"staticIpName": static_ip_name}
try:
client.allocate_static_ip(**create_params)
@@ -110,7 +105,6 @@ def create_static_ip(module, client, static_ip_name):
def delete_static_ip(module, client, static_ip_name):
-
inst = find_static_ip_info(module, client, static_ip_name)
if inst is None:
module.exit_json(changed=False, static_ip={})
@@ -126,24 +120,23 @@ def delete_static_ip(module, client, static_ip_name):
def main():
-
argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- client = module.client('lightsail')
+ client = module.client("lightsail")
- name = module.params.get('name')
- state = module.params.get('state')
+ name = module.params.get("name")
+ state = module.params.get("state")
- if state == 'present':
+ if state == "present":
create_static_ip(module, client, name)
- elif state == 'absent':
+ elif state == "absent":
delete_static_ip(module, client, name)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/mq_broker.py b/ansible_collections/community/aws/plugins/modules/mq_broker.py
new file mode 100644
index 000000000..5a97fda92
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/mq_broker.py
@@ -0,0 +1,628 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: mq_broker
+version_added: 6.0.0
+short_description: MQ broker management
+description:
+ - Create/update/delete a broker.
+ - Reboot a broker.
+author:
+ - FCO (@fotto)
+options:
+ broker_name:
+ description:
+ - The Name of the MQ broker to work on.
+ type: str
+ required: true
+ state:
+ description:
+ - "C(present): Create/update broker."
+ - "C(absent): Delete broker."
+ - "C(restarted): Reboot broker."
+ choices: [ 'present', 'absent', 'restarted' ]
+ default: present
+ type: str
+ deployment_mode:
+ description:
+ - Set broker deployment type.
+ - Can be used only during creation.
+ - Defaults to C(SINGLE_INSTANCE).
+ choices: [ 'SINGLE_INSTANCE', 'ACTIVE_STANDBY_MULTI_AZ', 'CLUSTER_MULTI_AZ' ]
+ type: str
+ use_aws_owned_key:
+ description:
+ - Must be set to C(false) if I(kms_key_id) is provided as well.
+ - Can be used only during creation.
+ - Defaults to C(true).
+ type: bool
+ kms_key_id:
+ description:
+ - Use referenced key to encrypt broker data at rest.
+ - Can be used only during creation.
+ type: str
+ engine_type:
+ description:
+ - Set broker engine type.
+ - Can be used only during creation.
+ - Defaults to C(ACTIVEMQ).
+ choices: [ 'ACTIVEMQ', 'RABBITMQ' ]
+ type: str
+ maintenance_window_start_time:
+ description:
+ - Set maintenance window for automatic minor upgrades.
+ - Can be used only during creation.
+ - Not providing any value means "no maintenance window".
+ type: dict
+ publicly_accessible:
+ description:
+ - Allow/disallow public access.
+ - Can be used only during creation.
+ - Defaults to C(false).
+ type: bool
+ storage_type:
+ description:
+ - Set underlying storage type.
+ - Can be used only during creation.
+ - Defaults to C(EFS).
+ choices: [ 'EBS', 'EFS' ]
+ type: str
+ subnet_ids:
+ description:
+ - Defines where deploy broker instances to.
+ - Minimum required number depends on deployment type.
+ - Can be used only during creation.
+ type: list
+ elements: str
+ users:
+ description:
+ - This parameter allows to use a custom set of initial user(s).
+ - M(community.aws.mq_user) is the preferred way to manage (local) users
+ however a broker cannot be created without any user.
+ - If nothing is specified a default C(admin) user will be created along with brokers.
+ - Can be used only during creation. Use M(community.aws.mq_user) module for updates.
+ type: list
+ elements: dict
+ tags:
+ description:
+ - Tag newly created brokers.
+ - Can be used only during creation.
+ type: dict
+ authentication_strategy:
+ description: Choose between locally and remotely managed users.
+ choices: [ 'SIMPLE', 'LDAP' ]
+ type: str
+ auto_minor_version_upgrade:
+ description: Allow/disallow automatic minor version upgrades.
+ type: bool
+ default: true
+ engine_version:
+ description:
+ - Set engine version of broker.
+ - The special value C(latest) will pick the latest available version.
+ - The special value C(latest) is ignored on update.
+ type: str
+ host_instance_type:
+ description: Instance type of broker instances.
+ type: str
+ enable_audit_log:
+ description: Enable/disable to push audit logs to AWS CloudWatch.
+ type: bool
+ default: false
+ enable_general_log:
+ description: Enable/disable to push general logs to AWS CloudWatch.
+ type: bool
+ default: false
+ security_groups:
+ description:
+ - Associate security groups with broker.
+ - At least one must be provided during creation.
+ type: list
+ elements: str
+ wait:
+ description:
+ - Specifies whether the module waits for the desired C(state).
+ - The time to wait can be controlled by setting I(wait_timeout).
+ type: bool
+ default: false
+ version_added: 7.1.0
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for the broker to reach the desired state if I(wait=true).
+ default: 900
+ type: int
+ version_added: 7.1.0
+
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+"""
+
+
+EXAMPLES = r"""
+- name: create broker (if missing) with minimal required parameters
+ community.aws.mq_broker:
+ broker_name: "{{ broker_name }}"
+ security_groups:
+ - sg_xxxxxxx
+ subnet_ids:
+ - subnet_xxx
+ - subnet_yyy
+ register: result
+
+- set_fact:
+ broker_id: "{{ result.broker['BrokerId'] }}"
+
+- name: use mq_broker_info to wait until broker is ready
+ community.aws.mq_broker_info:
+ broker_id: "{{ broker_id }}"
+ register: result
+ until: "result.broker['BrokerState'] == 'RUNNING'"
+ retries: 15
+ delay: 60
+
+- name: create or update broker with almost all parameter set including credentials
+ community.aws.mq_broker:
+ broker_name: "my_broker_2"
+ state: present
+ deployment_mode: 'ACTIVE_STANDBY_MULTI_AZ'
+ use_aws_owned_key: false
+ kms_key_id: 'my-precreted-key-id'
+ engine_type: 'ACTIVEMQ'
+ maintenance_window_start_time:
+ DayOfWeek: 'MONDAY'
+ TimeOfDay: '03:15'
+ TimeZone: 'Europe/Berlin'
+ publicly_accessible: true
+ storage_type: 'EFS'
+ security_groups:
+ - sg_xxxxxxx
+ subnet_ids:
+ - subnet_xxx
+ - subnet_yyy
+ users:
+ - Username: 'initial-user'
+ Password': 'plain-text-password'
+ ConsoleAccess: true
+ tags:
+ env: Test
+ creator: ansible
+ authentication_strategy: 'SIMPLE'
+ auto_minor_version_upgrade: true
+ engine_version: "5.15.13"
+ host_instance_type: 'mq.t3.micro'
+ enable_audit_log: true
+ enable_general_log: true
+
+- name: reboot a broker
+ community.aws.mq_broker:
+ broker_name: "my_broker_2"
+ state: restarted
+
+- name: delete a broker
+ community.aws.mq_broker:
+ broker_name: "my_broker_2"
+ state: absent
+"""
+
+RETURN = r"""
+broker:
+ description:
+ - "All API responses are converted to snake yaml except 'Tags'"
+ - "'state=present': API response of create_broker() or update_broker() call"
+ - "'state=absent': result of describe_broker() call before delete_broker() is triggerd"
+ - "'state=restarted': result of describe_broker() after reboot has been triggered"
+ type: dict
+ returned: success
+"""
+
+try:
+ import botocore
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+from time import sleep
+from time import time
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+
+PARAMS_MAP = {
+ "authentication_strategy": "AuthenticationStrategy",
+ "auto_minor_version_upgrade": "AutoMinorVersionUpgrade",
+ "broker_name": "BrokerName",
+ "deployment_mode": "DeploymentMode",
+ "use_aws_owned_key": "EncryptionOptions/UseAwsOwnedKey",
+ "kms_key_id": "EncryptionOptions/KmsKeyId",
+ "engine_type": "EngineType",
+ "engine_version": "EngineVersion",
+ "host_instance_type": "HostInstanceType",
+ "enable_audit_log": "Logs/Audit",
+ "enable_general_log": "Logs/General",
+ "maintenance_window_start_time": "MaintenanceWindowStartTime",
+ "publicly_accessible": "PubliclyAccessible",
+ "security_groups": "SecurityGroups",
+ "storage_type": "StorageType",
+ "subnet_ids": "SubnetIds",
+ "users": "Users",
+ "tags": "Tags",
+}
+
+
+DEFAULTS = {
+ "authentication_strategy": "SIMPLE",
+ "auto_minor_version_upgrade": False,
+ "deployment_mode": "SINGLE_INSTANCE",
+ "use_aws_owned_key": True,
+ "engine_type": "ACTIVEMQ",
+ "engine_version": "latest",
+ "host_instance_type": "mq.t3.micro",
+ "enable_audit_log": False,
+ "enable_general_log": False,
+ "publicly_accessible": False,
+ "storage_type": "EFS",
+}
+
+CREATE_ONLY_PARAMS = [
+ "deployment_mode",
+ "use_aws_owned_key",
+ "kms_key_id",
+ "engine_type",
+ "maintenance_window_start_time",
+ "publicly_accessible",
+ "storage_type",
+ "subnet_ids",
+ "users",
+ "tags",
+]
+
+
+def _set_kwarg(kwargs, key, value):
+ mapped_key = PARAMS_MAP[key]
+ if "/" in mapped_key:
+ key_list = mapped_key.split("/")
+ key_list.reverse()
+ else:
+ key_list = [mapped_key]
+ data = kwargs
+ while len(key_list) > 1:
+ this_key = key_list.pop()
+ if this_key not in data:
+ data[this_key] = {}
+ #
+ data = data[this_key]
+ data[key_list[0]] = value
+
+
+def _fill_kwargs(module, apply_defaults=True, ignore_create_params=False):
+ kwargs = {}
+ if apply_defaults:
+ for p_name, p_value in DEFAULTS.items():
+ _set_kwarg(kwargs, p_name, p_value)
+ for p_name in module.params:
+ if ignore_create_params and p_name in CREATE_ONLY_PARAMS:
+ # silently ignore CREATE_ONLY_PARAMS on update to
+ # make playbooks idempotent
+ continue
+ if p_name in PARAMS_MAP and module.params[p_name] is not None:
+ _set_kwarg(kwargs, p_name, module.params[p_name])
+ else:
+ # ignore
+ pass
+ return kwargs
+
+
+def __list_needs_change(current, desired):
+ if len(current) != len(desired):
+ return True
+ # equal length:
+ c_sorted = sorted(current)
+ d_sorted = sorted(desired)
+ for index, value in enumerate(current):
+ if value != desired[index]:
+ return True
+ #
+ return False
+
+
+def __dict_needs_change(current, desired):
+ # values contained in 'current' but not specified in 'desired' are ignored
+ # value contained in 'desired' but not in 'current' (unsupported attributes) are ignored
+ for key in desired:
+ if key in current:
+ if desired[key] != current[key]:
+ return True
+ #
+ return False
+
+
+def _needs_change(current, desired):
+ needs_change = False
+ for key in desired:
+ current_value = current[key]
+ desired_value = desired[key]
+ if isinstance(current_value, (int, str, bool)):
+ if current_value != desired_value:
+ needs_change = True
+ break
+ elif isinstance(current_value, list):
+ # assumption: all 'list' type settings we allow changes for have scalar values
+ if __list_needs_change(current_value, desired_value):
+ needs_change = True
+ break
+ elif isinstance(current_value, dict):
+ # assumption: all 'dict' type settings we allow changes for have scalar values
+ if __dict_needs_change(current_value, desired_value):
+ needs_change = True
+ break
+ else:
+ # unexpected type
+ needs_change = True
+ break
+ #
+ return needs_change
+
+
+def get_latest_engine_version(conn, module, engine_type):
+ try:
+ response = conn.describe_broker_engine_types(EngineType=engine_type)
+ return response["BrokerEngineTypes"][0]["EngineVersions"][0]["Name"]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list engine versions")
+
+
+def get_broker_id(conn, module):
+ try:
+ broker_name = module.params["broker_name"]
+ broker_id = None
+ response = conn.list_brokers(MaxResults=100)
+ for broker in response["BrokerSummaries"]:
+ if broker["BrokerName"] == broker_name:
+ broker_id = broker["BrokerId"]
+ break
+ return broker_id
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list broker brokers.")
+
+
+def get_broker_info(conn, module, broker_id):
+ try:
+ return conn.describe_broker(BrokerId=broker_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get broker details.")
+
+
+def wait_for_status(conn, module):
+ interval_secs = 5
+ timeout = module.params.get("wait_timeout", 900)
+ broker_name = module.params.get("broker_name")
+ desired_state = module.params.get("state")
+ done = False
+
+ paginator = conn.get_paginator("list_brokers")
+ page_iterator = paginator.paginate(PaginationConfig={"MaxItems": 100, "PageSize": 100, "StartingToken": ""})
+ wait_timeout = time() + timeout
+
+ while wait_timeout > time():
+ try:
+ filtered_iterator = page_iterator.search(f"BrokerSummaries[?BrokerName == `{broker_name}`][]")
+ broker_list = list(filtered_iterator)
+
+ if module.check_mode:
+ return
+
+ if len(broker_list) < 1 and desired_state == "absent":
+ done = True
+ break
+
+ if desired_state in ["present", "rebooted"] and broker_list[0]["BrokerState"] == "RUNNING":
+ done = True
+ break
+
+ if broker_list[0]["BrokerState"] == "CREATION_FAILED":
+ break
+
+ sleep(interval_secs)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't paginate brokers.")
+
+ if not done:
+ module.fail_json(msg="desired state not reached")
+
+
+def reboot_broker(conn, module, broker_id):
+ wait = module.params.get("wait")
+
+ try:
+ response = conn.reboot_broker(BrokerId=broker_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't reboot broker.")
+
+ if wait:
+ wait_for_status(conn, module)
+
+ return response
+
+
+def delete_broker(conn, module, broker_id):
+ wait = module.params.get("wait")
+
+ try:
+ response = conn.delete_broker(BrokerId=broker_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete broker.")
+
+ if wait:
+ wait_for_status(conn, module)
+
+ return response
+
+
+def create_broker(conn, module):
+ kwargs = _fill_kwargs(module)
+ wait = module.params.get("wait")
+
+ if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest":
+ kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"])
+ if kwargs["AuthenticationStrategy"] == "LDAP":
+ module.fail_json(msg="'AuthenticationStrategy=LDAP' not supported, yet")
+ if "Users" not in kwargs:
+ # add some stupid default (cannot create broker without any users)
+ kwargs["Users"] = [{"Username": "admin", "Password": "adminPassword", "ConsoleAccess": True, "Groups": []}]
+ if "EncryptionOptions" in kwargs and "UseAwsOwnedKey" in kwargs["EncryptionOptions"]:
+ kwargs["EncryptionOptions"]["UseAwsOwnedKey"] = False
+ #
+ if "SecurityGroups" not in kwargs or len(kwargs["SecurityGroups"]) == 0:
+ module.fail_json(msg="At least one security group must be specified on broker creation")
+ #
+ changed = True
+ result = conn.create_broker(**kwargs)
+ #
+ if wait:
+ wait_for_status(conn, module)
+
+ return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed}
+
+
+def update_broker(conn, module, broker_id):
+ kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True)
+ wait = module.params.get("wait")
+ # replace name with id
+ broker_name = kwargs["BrokerName"]
+ del kwargs["BrokerName"]
+ kwargs["BrokerId"] = broker_id
+ # get current state for comparison:
+ api_result = get_broker_info(conn, module, broker_id)
+ if api_result["BrokerState"] != "RUNNING":
+ module.fail_json(
+ msg=f"Cannot trigger update while broker ({broker_id}) is in state {api_result['BrokerState']}",
+ )
+ # engine version of 'latest' is taken as "keep current one"
+ # i.e. do not request upgrade on playbook rerun
+ if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest":
+ kwargs["EngineVersion"] = api_result["EngineVersion"]
+ result = {"broker_id": broker_id, "broker_name": broker_name}
+ changed = False
+ if _needs_change(api_result, kwargs):
+ changed = True
+ if not module.check_mode:
+ api_result = conn.update_broker(**kwargs)
+ #
+ #
+ if wait:
+ wait_for_status(conn, module)
+
+ return {"broker": result, "changed": changed}
+
+
+def ensure_absent(conn, module):
+ result = {"broker_name": module.params["broker_name"], "broker_id": None}
+ if module.check_mode:
+ return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": True}
+ broker_id = get_broker_id(conn, module)
+ result["broker_id"] = broker_id
+
+ if not broker_id:
+ # silently ignore delete of unknown broker (to make it idempotent)
+ return {"broker": result, "changed": False}
+
+ try:
+ # check for pending delete (small race condition possible here
+ api_result = get_broker_info(conn, module, broker_id)
+ if api_result["BrokerState"] == "DELETION_IN_PROGRESS":
+ return {"broker": result, "changed": False}
+ delete_broker(conn, module, broker_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ return {"broker": result, "changed": True}
+
+
+def ensure_present(conn, module):
+ if module.check_mode:
+ return {"broker": {"broker_arn": "fakeArn", "broker_id": "fakeId"}, "changed": True}
+
+ broker_id = get_broker_id(conn, module)
+ if broker_id:
+ return update_broker(conn, module, broker_id)
+
+ return create_broker(conn, module)
+
+
+def main():
+ argument_spec = dict(
+ broker_name=dict(required=True, type="str"),
+ state=dict(default="present", choices=["present", "absent", "restarted"]),
+ wait=dict(default=False, type="bool"),
+ wait_timeout=dict(default=900, type="int"),
+ # parameters only allowed on create
+ deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]),
+ use_aws_owned_key=dict(type="bool"),
+ kms_key_id=dict(type="str"),
+ engine_type=dict(choices=["ACTIVEMQ", "RABBITMQ"], type="str"),
+ maintenance_window_start_time=dict(type="dict"),
+ publicly_accessible=dict(type="bool"),
+ storage_type=dict(choices=["EBS", "EFS"]),
+ subnet_ids=dict(type="list", elements="str"),
+ users=dict(type="list", elements="dict"),
+ tags=dict(type="dict"),
+ # parameters allowed on update as well
+ authentication_strategy=dict(choices=["SIMPLE", "LDAP"]),
+ auto_minor_version_upgrade=dict(default=True, type="bool"),
+ engine_version=dict(type="str"),
+ host_instance_type=dict(type="str"),
+ enable_audit_log=dict(default=False, type="bool"),
+ enable_general_log=dict(default=False, type="bool"),
+ security_groups=dict(type="list", elements="str"),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client("mq")
+
+ if module.params["state"] == "present":
+ try:
+ compound_result = ensure_present(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ #
+ module.exit_json(**compound_result)
+
+ if module.params["state"] == "absent":
+ try:
+ compound_result = ensure_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ #
+ module.exit_json(**compound_result)
+
+ if module.params["state"] == "restarted":
+ broker_id = get_broker_id(connection, module)
+ if module.check_mode:
+ module.exit_json(broker={"broker_id": broker_id if broker_id else "fakeId"}, changed=True)
+ if not broker_id:
+ module.fail_json(
+ msg="Cannot find broker with name {module.params['broker_name']}.",
+ )
+ try:
+ changed = True
+ if not module.check_mode:
+ reboot_broker(connection, module, broker_id)
+ #
+ result = get_broker_info(connection, module, broker_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ module.exit_json(broker=result, changed=changed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/mq_broker_config.py b/ansible_collections/community/aws/plugins/modules/mq_broker_config.py
new file mode 100644
index 000000000..781bbb7d5
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/mq_broker_config.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: mq_broker_config
+version_added: 6.0.0
+short_description: Update Amazon MQ broker configuration
+description:
+ - Update configuration for an MQ broker.
+ - If new configuration differs from the current one a new configuration
+ is created and the new version is assigned to the broker.
+ - Optionally allows broker reboot to make changes effective immediately.
+author:
+ - FCO (@fotto)
+options:
+ broker_id:
+ description:
+ - The ID of the MQ broker to work on.
+ type: str
+ required: true
+ config_xml:
+ description:
+ - The maximum number of results to return.
+ type: str
+ required: true
+ config_description:
+ description:
+ - Description to set on new configuration revision.
+ type: str
+ reboot:
+ description:
+ - Reboot broker after new config has been applied.
+ type: bool
+ default: false
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+"""
+
+EXAMPLES = r"""
+- name: send new XML config to broker relying on credentials from environment
+ community.aws.mq_broker_config:
+ broker_id: "aws-mq-broker-id"
+ config_xml: "{{ lookup('file', 'activemq.xml' )}}"
+ region: "{{ aws_region }}"
+
+- name: send new XML config to broker and reboot if necessary
+ community.aws.mq_broker_config:
+ broker_id: "aws-mq-broker-id"
+ config_xml: "{{ lookup('file', 'activemq2.xml' )}}"
+ reboot: true
+
+- name: send new broker config and set all credentials explicitly
+ community.aws.mq_broker_config:
+ broker_id: "{{ broker_id }}"
+ config_xml: "{{ lookup('file', 'activemq3.xml')}}"
+ config_description: "custom description for configuration object"
+ register: result
+"""
+
+RETURN = r"""
+broker:
+ description: API response of describe_broker() converted to snake yaml after changes have been applied.
+ type: dict
+ returned: success
+configuration:
+ description: Details about new configuration object.
+ returned: I(changed=true)
+ type: complex
+ contains:
+ id:
+ description: Configuration ID of broker configuration.
+ type: str
+ example: c-386541b8-3139-42c2-9c2c-a4c267c1714f
+ revision:
+ description: Revision of the configuration that will be active after next reboot.
+ type: int
+ example: 4
+"""
+
+import base64
+import re
+
+try:
+ import botocore
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+
+DEFAULTS = {"reboot": False}
+FULL_DEBUG = False
+
+
+def is_same_config(old, new):
+ # we a simple comparision here: strip down spaces and compare the rest
+ # TODO: use same XML normalizer on new as used by AWS before comparing strings
+ old_stripped = re.sub(r"\s+", " ", old, flags=re.S).rstrip()
+ new_stripped = re.sub(r"\s+", " ", new, flags=re.S).rstrip()
+ return old_stripped == new_stripped
+
+
+def get_broker_info(conn, module):
+ try:
+ return conn.describe_broker(BrokerId=module.params["broker_id"])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if module.check_mode:
+ return {
+ "broker_id": module.params["broker_id"],
+ }
+ module.fail_json_aws(e, msg="Couldn't get broker details.")
+
+
+def get_current_configuration(conn, module, cfg_id, cfg_revision):
+ try:
+ return conn.describe_configuration_revision(ConfigurationId=cfg_id, ConfigurationRevision=str(cfg_revision))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get configuration revision.")
+
+
+def create_and_assign_config(conn, module, broker_id, cfg_id, cfg_xml_encoded):
+ kwargs = {"ConfigurationId": cfg_id, "Data": cfg_xml_encoded}
+ if "config_description" in module.params and module.params["config_description"]:
+ kwargs["Description"] = module.params["config_description"]
+ else:
+ kwargs["Description"] = "Updated through community.aws.mq_broker_config ansible module"
+ #
+ try:
+ c_response = conn.update_configuration(**kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create new configuration revision.")
+ #
+ new_config_revision = c_response["LatestRevision"]["Revision"]
+ try:
+ b_response = conn.update_broker(
+ BrokerId=broker_id, Configuration={"Id": cfg_id, "Revision": new_config_revision}
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't assign new configuration revision to broker.")
+ #
+ return (c_response, b_response)
+
+
+def reboot_broker(conn, module, broker_id):
+ try:
+ return conn.reboot_broker(BrokerId=broker_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't reboot broker.")
+
+
+def ensure_config(conn, module):
+ broker_id = module.params["broker_id"]
+ broker_info = get_broker_info(conn, module)
+ changed = False
+ if module.check_mode and "Configurations" not in broker_info:
+ # not result from get_broker_info(). use requeste config
+ current_cfg_decoded = module.params["config_xml"]
+ else:
+ current_cfg = broker_info["Configurations"]["Current"]
+ if "Pending" in broker_info["Configurations"]:
+ current_cfg = broker_info["Configurations"]["Pending"]
+ current_cfg_encoded = get_current_configuration(conn, module, current_cfg["Id"], current_cfg["Revision"])[
+ "Data"
+ ]
+ current_cfg_decoded = base64.b64decode(current_cfg_encoded.encode()).decode()
+
+ if is_same_config(current_cfg_decoded, module.params["config_xml"]):
+ return {"changed": changed, "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"])}
+
+ (c_response, _b_response) = (None, None)
+ if not module.check_mode:
+ new_cfg_encoded = base64.b64encode(module.params["config_xml"].encode()).decode()
+ (c_response, _b_response) = create_and_assign_config(
+ conn, module, broker_id, current_cfg["Id"], new_cfg_encoded
+ )
+ #
+ changed = True
+
+ if changed and module.params["reboot"] and not module.check_mode:
+ reboot_broker(conn, module, broker_id)
+ #
+ broker_info = get_broker_info(conn, module)
+ return_struct = {
+ "changed": changed,
+ "broker": camel_dict_to_snake_dict(broker_info, ignore_list=["Tags"]),
+ "configuration": {"id": c_response["Id"], "revision": c_response["LatestRevision"]["Revision"]},
+ }
+ if FULL_DEBUG:
+ return_struct["old_config_xml"] = base64.b64decode(current_cfg_encoded)
+ return_struct["new_config_xml"] = module.params["config_xml"]
+ return_struct["old_config_revision"] = current_cfg["Revision"]
+ return return_struct
+
+
+def main():
+ argument_spec = dict(
+ broker_id=dict(required=True, type="str"),
+ config_xml=dict(required=True, type="str"),
+ config_description=dict(required=False, type="str"),
+ reboot=dict(required=False, type="bool", default=DEFAULTS["reboot"]),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client("mq")
+
+ try:
+ result = ensure_config(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/mq_broker_info.py b/ansible_collections/community/aws/plugins/modules/mq_broker_info.py
new file mode 100644
index 000000000..e760e0179
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/mq_broker_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: mq_broker_info
+version_added: 6.0.0
+short_description: Retrieve MQ Broker details
+description:
+ - Get details about a broker.
+author:
+ - FCO (@fotto)
+options:
+ broker_id:
+ description: Get details for broker with specified ID.
+ type: str
+ broker_name:
+ description:
+ - Get details for broker with specified Name.
+ - Is ignored if I(broker_id) is specified.
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+"""
+
+
+EXAMPLES = r"""
+- name: get current broker settings by id
+ community.aws.mq_broker_info:
+ broker_id: "aws-mq-broker-id"
+ register: broker_info
+
+- name: get current broker settings by name setting all credential parameters explicitly
+ community.aws.mq_broker_info:
+ broker_name: "aws-mq-broker-name"
+ register: broker_info
+"""
+
+RETURN = r"""
+broker:
+ description: API response of describe_broker() converted to snake yaml.
+ type: dict
+ returned: success
+"""
+
+try:
+ import botocore
+except ImportError:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+
+
+def get_broker_id(conn, module):
+ try:
+ broker_name = module.params["broker_name"]
+ broker_id = None
+ response = conn.list_brokers(MaxResults=100)
+ for broker in response["BrokerSummaries"]:
+ if broker["BrokerName"] == broker_name:
+ broker_id = broker["BrokerId"]
+ break
+ return broker_id
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't list broker brokers.")
+
+
+def get_broker_info(conn, module, broker_id):
+ try:
+ return conn.describe_broker(BrokerId=broker_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if module.check_mode:
+ module.exit_json(broker={"broker_id": broker_id, "broker_name": "fakeName"})
+ else:
+ module.fail_json_aws(e, msg="Couldn't get broker details.")
+
+
+def main():
+ argument_spec = dict(broker_id=dict(type="str"), broker_name=dict(type="str"))
+ required_one_of = (
+ (
+ "broker_name",
+ "broker_id",
+ ),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_one_of=required_one_of,
+ supports_check_mode=True,
+ )
+ broker_id = module.params["broker_id"]
+ broker_name = module.params["broker_name"]
+
+ connection = module.client("mq")
+
+ try:
+ if not broker_id:
+ broker_id = get_broker_id(connection, module)
+ if not broker_id:
+ if module.check_mode:
+ module.exit_json(
+ broker={"broker_id": "fakeId", "broker_name": broker_name if broker_name else "fakeName"}
+ )
+ result = get_broker_info(connection, module, broker_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+ #
+ module.exit_json(broker=camel_dict_to_snake_dict(result, ignore_list=["Tags"]))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/mq_user.py b/ansible_collections/community/aws/plugins/modules/mq_user.py
new file mode 100644
index 000000000..68e1fd629
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/mq_user.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: mq_user
+version_added: 6.0.0
+short_description: Manage users in existing Amazon MQ broker
+description:
+ - Manage Amazon MQ users.
+ - Pending changes are taking into account for idempotency.
+author:
+ - FCO (@fotto)
+options:
+ broker_id:
+ description:
+ - The ID of the MQ broker to work on.
+ type: str
+ required: true
+ username:
+ description:
+ - The name of the user to create/update/delete.
+ type: str
+ required: true
+ state:
+ description:
+ - Create/Update vs Delete of user.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ console_access:
+ description:
+ - Whether the user can access the MQ Console.
+ - Defaults to C(false) on creation.
+ type: bool
+ groups:
+ description:
+ - Set group memberships for user.
+ - Defaults to C([]) on creation.
+ type: list
+ elements: str
+ password:
+ description:
+ - Set password for user.
+ - Defaults to a random password on creation.
+ - Ignored unless I(allow_pw_update=true).
+ type: str
+ allow_pw_update:
+ description:
+ - When I(allow_pw_update=true) and I(password) is set, the password
+ will always be updated for the user.
+ default: false
+ type: bool
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+"""
+
+EXAMPLES = r"""
+- name: create/update user - set provided password if user doesn't exist, yet
+ community.aws.mq_user:
+ state: present
+ broker_id: "aws-mq-broker-id"
+ username: "sample_user1"
+ console_access: false
+ groups: ["g1", "g2"]
+ password: "plain-text-password"
+
+- name: allow console access and update group list - relying on default state
+ community.aws.mq_user:
+ broker_id: "aws-mq-broker-id"
+ username: "sample_user1"
+ region: "{{ aws_region }}"
+ console_access: true
+ groups: ["g1", "g2", "g3"]
+
+- name: remove user - setting all credentials explicitly
+ community.aws.mq_user:
+ state: absent
+ broker_id: "aws-mq-broker-id"
+ username: "other_user"
+"""
+
+RETURN = r"""
+user:
+ description:
+ - just echos the username
+ - "only present when state=present"
+ type: str
+ returned: success
+"""
+
+import secrets
+
+try:
+ import botocore
+except ImportError as ex:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+
+CREATE_DEFAULTS = {
+ "console_access": False,
+ "groups": [],
+}
+
+
+def _group_change_required(user_response, requested_groups):
+ current_groups = []
+ if "Groups" in user_response:
+ current_groups = user_response["Groups"]
+ elif "Pending" in user_response:
+ # to support automatic testing without broker reboot
+ current_groups = user_response["Pending"]["Groups"]
+ if len(current_groups) != len(requested_groups):
+ return True
+ if len(current_groups) != len(set(current_groups) & set(requested_groups)):
+ return True
+ #
+ return False
+
+
+def _console_access_change_required(user_response, requested_boolean):
+ current_boolean = CREATE_DEFAULTS["console_access"]
+ if "ConsoleAccess" in user_response:
+ current_boolean = user_response["ConsoleAccess"]
+ elif "Pending" in user_response:
+ # to support automatic testing without broker reboot
+ current_boolean = user_response["Pending"]["ConsoleAccess"]
+ #
+ return current_boolean != requested_boolean
+
+
+def generate_password():
+ return secrets.token_hex(20)
+
+
+# returns API response object
+def _create_user(conn, module):
+ kwargs = {"BrokerId": module.params["broker_id"], "Username": module.params["username"]}
+ if "groups" in module.params and module.params["groups"] is not None:
+ kwargs["Groups"] = module.params["groups"]
+ else:
+ kwargs["Groups"] = CREATE_DEFAULTS["groups"]
+ if "password" in module.params and module.params["password"]:
+ kwargs["Password"] = module.params["password"]
+ else:
+ kwargs["Password"] = generate_password()
+ if "console_access" in module.params and module.params["console_access"] is not None:
+ kwargs["ConsoleAccess"] = module.params["console_access"]
+ else:
+ kwargs["ConsoleAccess"] = CREATE_DEFAULTS["console_access"]
+ try:
+ response = conn.create_user(**kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create user")
+ return response
+
+
+# returns API response object
+def _update_user(conn, module, kwargs):
+ try:
+ response = conn.update_user(**kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update user")
+ return response
+
+
+def get_matching_user(conn, module, broker_id, username):
+ try:
+ response = conn.describe_user(BrokerId=broker_id, Username=username)
+ except is_boto3_error_code("NotFoundException"):
+ return None
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get user details")
+ return response
+
+
+def ensure_user_present(conn, module):
+ user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"])
+ changed = False
+
+ if user is None:
+ if not module.check_mode:
+ _response = _create_user(conn, module)
+ changed = True
+ else:
+ kwargs = {}
+ if "groups" in module.params and module.params["groups"] is not None:
+ if _group_change_required(user, module.params["groups"]):
+ kwargs["Groups"] = module.params["groups"]
+ if "console_access" in module.params and module.params["console_access"] is not None:
+ if _console_access_change_required(user, module.params["console_access"]):
+ kwargs["ConsoleAccess"] = module.params["console_access"]
+ if "password" in module.params and module.params["password"]:
+ if "allow_pw_update" in module.params and module.params["allow_pw_update"]:
+ kwargs["Password"] = module.params["password"]
+ if len(kwargs) == 0:
+ changed = False
+ else:
+ if not module.check_mode:
+ kwargs["BrokerId"] = module.params["broker_id"]
+ kwargs["Username"] = module.params["username"]
+ response = _update_user(conn, module, kwargs)
+ #
+ changed = True
+ #
+ user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"])
+
+ return {"changed": changed, "user": camel_dict_to_snake_dict(user, ignore_list=["Tags"])}
+
+
+def ensure_user_absent(conn, module):
+ user = get_matching_user(conn, module, module.params["broker_id"], module.params["username"])
+ result = {"changed": False}
+ if user is None:
+ return result
+ # better support for testing
+ if "Pending" in user and "PendingChange" in user["Pending"] and user["Pending"]["PendingChange"] == "DELETE":
+ return result
+
+ result = {"changed": True}
+ if module.check_mode:
+ return result
+
+ try:
+ conn.delete_user(BrokerId=user["BrokerId"], Username=user["Username"])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete user")
+
+ return result
+
+
+def main():
+ argument_spec = dict(
+ broker_id=dict(required=True, type="str"),
+ username=dict(required=True, type="str"),
+ console_access=dict(required=False, type="bool"),
+ groups=dict(required=False, type="list", elements="str"),
+ password=dict(required=False, type="str", no_log=True),
+ allow_pw_update=dict(default=False, required=False, type="bool"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client("mq")
+
+ state = module.params.get("state")
+
+ try:
+ if state == "present":
+ result = ensure_user_present(connection, module)
+ elif state == "absent":
+ result = ensure_user_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/mq_user_info.py b/ansible_collections/community/aws/plugins/modules/mq_user_info.py
new file mode 100644
index 000000000..64cf92da7
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/mq_user_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: mq_user_info
+version_added: 6.0.0
+short_description: List users of an Amazon MQ broker
+description:
+ - List users for the specified broker ID.
+ - Pending creations and deletions can be skipped by options.
+author:
+ - FCO (@fotto)
+options:
+ broker_id:
+ description:
+ - The ID of the MQ broker to work on.
+ type: str
+ required: true
+ max_results:
+ description:
+ - The maximum number of results to return.
+ type: int
+ default: 100
+ skip_pending_create:
+ description:
+ - Will skip pending creates from the result set.
+ type: bool
+ default: false
+ skip_pending_delete:
+ description:
+ - Will skip pending deletes from the result set.
+ type: bool
+ default: false
+ as_dict:
+ description:
+ - Convert result into lookup table by username.
+ type: bool
+ default: false
+
+extends_documentation_fragment:
+ - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+"""
+
+
+EXAMPLES = r"""
+- name: get all users as list - relying on environment for API credentials
+ community.aws.mq_user_info:
+ broker_id: "aws-mq-broker-id"
+ max_results: 50
+ register: result
+
+- name: get users as dict - explicitly specifying all credentials
+ community.aws.mq_user_info:
+ broker_id: "aws-mq-broker-id"
+ register: result
+
+- name: get list of users to decide which may need to be deleted
+ community.aws.mq_user_info:
+ broker_id: "aws-mq-broker-id"
+ skip_pending_delete: true
+
+- name: get list of users to decide which may need to be created
+ community.aws.mq_user_info:
+ broker_id: "aws-mq-broker-id"
+ skip_pending_create: true
+"""
+
+RETURN = r"""
+users:
+ type: dict
+ returned: success
+ description:
+ - dict key is username
+ - each entry is the record for a user as returned by API but converted to snake yaml
+"""
+
+try:
+ import botocore
+except ImportError as ex:
+ # handled by AnsibleAWSModule
+ pass
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+DEFAULTS = {"max_results": 100, "skip_pending_create": False, "skip_pending_delete": False, "as_dict": True}
+
+
+def get_user_info(conn, module):
+ try:
+ response = conn.list_users(BrokerId=module.params["broker_id"], MaxResults=module.params["max_results"])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if module.check_mode:
+ # return empty set for unknown broker in check mode
+ if DEFAULTS["as_dict"]:
+ return {}
+ return []
+ module.fail_json_aws(e, msg="Failed to describe users")
+ #
+ if not module.params["skip_pending_create"] and not module.params["skip_pending_delete"]:
+ # we can simply return the sub-object from the response
+ records = response["Users"]
+ else:
+ records = []
+ for record in response["Users"]:
+ if "PendingChange" in record:
+ if record["PendingChange"] == "CREATE" and module.params["skip_pending_create"]:
+ continue
+ if record["PendingChange"] == "DELETE" and module.params["skip_pending_delete"]:
+ continue
+ #
+ records.append(record)
+ #
+ if DEFAULTS["as_dict"]:
+ user_records = {}
+ for record in records:
+ user_records[record["Username"]] = record
+ #
+ return camel_dict_to_snake_dict(user_records, ignore_list=["Tags"])
+
+ return camel_dict_to_snake_dict(records, ignore_list=["Tags"])
+
+
+def main():
+ argument_spec = dict(
+ broker_id=dict(required=True, type="str"),
+ max_results=dict(required=False, type="int", default=DEFAULTS["max_results"]),
+ skip_pending_create=dict(required=False, type="bool", default=DEFAULTS["skip_pending_create"]),
+ skip_pending_delete=dict(required=False, type="bool", default=DEFAULTS["skip_pending_delete"]),
+ as_dict=dict(required=False, type="bool", default=False),
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ connection = module.client("mq")
+
+ try:
+ user_records = get_user_info(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(users=user_records)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/msk_cluster.py b/ansible_collections/community/aws/plugins/modules/msk_cluster.py
index 75c7fa829..aa0383294 100644
--- a/ansible_collections/community/aws/plugins/modules/msk_cluster.py
+++ b/ansible_collections/community/aws/plugins/modules/msk_cluster.py
@@ -1,12 +1,9 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
DOCUMENTATION = r"""
---
module: msk_cluster
@@ -207,16 +204,16 @@ options:
description: How many seconds to wait. Cluster creation can take up to 20-30 minutes.
type: int
default: 3600
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
notes:
- All operations are time consuming, for example create takes 20-30 minutes,
update kafka version -- more than one hour, update configuration -- 10-15 minutes;
- Cluster's brokers get evenly distributed over a number of availability zones
that's equal to the number of subnets.
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+ - amazon.aws.tags
"""
EXAMPLES = r"""
@@ -274,12 +271,12 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- camel_dict_to_snake_dict,
- compare_aws_tags,
- AWSRetry,
-)
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
@AWSRetry.jittered_backoff(retries=5, delay=5)
@@ -304,7 +301,7 @@ def find_cluster_by_name(client, module, cluster_name):
module.fail_json_aws(e, "Failed to find kafka cluster by name")
if cluster_list:
if len(cluster_list) != 1:
- module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name))
+ module.fail_json(msg=f"Found more than one cluster with name '{cluster_name}'")
return cluster_list[0]
return {}
@@ -343,11 +340,7 @@ def wait_for_cluster_state(client, module, arn, state="ACTIVE"):
if current_state == state:
return
if time.time() - start > timeout:
- module.fail_json(
- msg="Timeout waiting for cluster {0} (desired state is '{1}')".format(
- current_state, state
- )
- )
+ module.fail_json(msg=f"Timeout waiting for cluster {current_state} (desired state is '{state}')")
time.sleep(check_interval)
@@ -367,7 +360,7 @@ def prepare_create_options(module):
"BrokerNodeGroupInfo": {
"ClientSubnets": module.params["subnets"],
"InstanceType": module.params["instance_type"],
- }
+ },
}
if module.params["security_groups"] and len(module.params["security_groups"]) != 0:
@@ -375,9 +368,7 @@ def prepare_create_options(module):
if module.params["ebs_volume_size"]:
c_params["BrokerNodeGroupInfo"]["StorageInfo"] = {
- "EbsStorageInfo": {
- "VolumeSize": module.params.get("ebs_volume_size")
- }
+ "EbsStorageInfo": {"VolumeSize": module.params.get("ebs_volume_size")}
}
if module.params["encryption"]:
@@ -388,7 +379,7 @@ def prepare_create_options(module):
}
c_params["EncryptionInfo"]["EncryptionInTransit"] = {
"ClientBroker": module.params["encryption"]["in_transit"].get("client_broker", "TLS"),
- "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True)
+ "InCluster": module.params["encryption"]["in_transit"].get("in_cluster", True),
}
if module.params["authentication"]:
@@ -428,12 +419,8 @@ def prepare_open_monitoring_options(module):
open_monitoring = module.params["open_monitoring"] or {}
m_params["OpenMonitoring"] = {
"Prometheus": {
- "JmxExporter": {
- "EnabledInBroker": open_monitoring.get("jmx_exporter", False)
- },
- "NodeExporter": {
- "EnabledInBroker": open_monitoring.get("node_exporter", False)
- }
+ "JmxExporter": {"EnabledInBroker": open_monitoring.get("jmx_exporter", False)},
+ "NodeExporter": {"EnabledInBroker": open_monitoring.get("node_exporter", False)},
}
}
return m_params
@@ -445,36 +432,26 @@ def prepare_logging_options(module):
if logging.get("cloudwatch"):
l_params["CloudWatchLogs"] = {
"Enabled": module.params["logging"]["cloudwatch"].get("enabled"),
- "LogGroup": module.params["logging"]["cloudwatch"].get("log_group")
+ "LogGroup": module.params["logging"]["cloudwatch"].get("log_group"),
}
else:
- l_params["CloudWatchLogs"] = {
- "Enabled": False
- }
+ l_params["CloudWatchLogs"] = {"Enabled": False}
if logging.get("firehose"):
l_params["Firehose"] = {
"Enabled": module.params["logging"]["firehose"].get("enabled"),
- "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream")
+ "DeliveryStream": module.params["logging"]["firehose"].get("delivery_stream"),
}
else:
- l_params["Firehose"] = {
- "Enabled": False
- }
+ l_params["Firehose"] = {"Enabled": False}
if logging.get("s3"):
l_params["S3"] = {
"Enabled": module.params["logging"]["s3"].get("enabled"),
"Bucket": module.params["logging"]["s3"].get("bucket"),
- "Prefix": module.params["logging"]["s3"].get("prefix")
+ "Prefix": module.params["logging"]["s3"].get("prefix"),
}
else:
- l_params["S3"] = {
- "Enabled": False
- }
- return {
- "LoggingInfo": {
- "BrokerLogs": l_params
- }
- }
+ l_params["S3"] = {"Enabled": False}
+ return {"LoggingInfo": {"BrokerLogs": l_params}}
def create_or_update_cluster(client, module):
@@ -488,7 +465,6 @@ def create_or_update_cluster(client, module):
cluster = find_cluster_by_name(client, module, module.params["name"])
if not cluster:
-
changed = True
if module.check_mode:
@@ -508,7 +484,6 @@ def create_or_update_cluster(client, module):
wait_for_cluster_state(client, module, arn=response["ClusterArn"], state="ACTIVE")
else:
-
response["ClusterArn"] = cluster["ClusterArn"]
response["changes"] = {}
@@ -517,9 +492,7 @@ def create_or_update_cluster(client, module):
"broker_count": {
"current_value": cluster["NumberOfBrokerNodes"],
"target_value": module.params.get("nodes"),
- "update_params": {
- "TargetNumberOfBrokerNodes": module.params.get("nodes")
- }
+ "update_params": {"TargetNumberOfBrokerNodes": module.params.get("nodes")},
},
"broker_storage": {
"current_value": cluster["BrokerNodeGroupInfo"]["StorageInfo"]["EbsStorageInfo"]["VolumeSize"],
@@ -528,14 +501,12 @@ def create_or_update_cluster(client, module):
"TargetBrokerEBSVolumeInfo": [
{"KafkaBrokerNodeId": "All", "VolumeSizeGB": module.params.get("ebs_volume_size")}
]
- }
+ },
},
"broker_type": {
"current_value": cluster["BrokerNodeGroupInfo"]["InstanceType"],
"target_value": module.params.get("instance_type"),
- "update_params": {
- "TargetInstanceType": module.params.get("instance_type")
- }
+ "update_params": {"TargetInstanceType": module.params.get("instance_type")},
},
"cluster_configuration": {
"current_value": {
@@ -549,51 +520,44 @@ def create_or_update_cluster(client, module):
"update_params": {
"ConfigurationInfo": {
"Arn": module.params.get("configuration_arn"),
- "Revision": module.params.get("configuration_revision")
+ "Revision": module.params.get("configuration_revision"),
}
- }
+ },
},
"cluster_kafka_version": {
"current_value": cluster["CurrentBrokerSoftwareInfo"]["KafkaVersion"],
"target_value": module.params.get("version"),
- "update_params": {
- "TargetKafkaVersion": module.params.get("version")
- }
+ "update_params": {"TargetKafkaVersion": module.params.get("version")},
},
"enhanced_monitoring": {
"current_value": cluster["EnhancedMonitoring"],
"target_value": module.params.get("enhanced_monitoring"),
"update_method": "update_monitoring",
- "update_params": prepare_enhanced_monitoring_options(module)
+ "update_params": prepare_enhanced_monitoring_options(module),
},
"open_monitoring": {
- "current_value": {
- "OpenMonitoring": cluster["OpenMonitoring"]
- },
+ "current_value": {"OpenMonitoring": cluster["OpenMonitoring"]},
"target_value": prepare_open_monitoring_options(module),
"update_method": "update_monitoring",
- "update_params": prepare_open_monitoring_options(module)
+ "update_params": prepare_open_monitoring_options(module),
},
"logging": {
- "current_value": {
- "LoggingInfo": cluster["LoggingInfo"]
- },
+ "current_value": {"LoggingInfo": cluster["LoggingInfo"]},
"target_value": prepare_logging_options(module),
"update_method": "update_monitoring",
- "update_params": prepare_logging_options(module)
- }
+ "update_params": prepare_logging_options(module),
+ },
}
for method, options in msk_cluster_changes.items():
-
- if 'botocore_version' in options:
+ if "botocore_version" in options:
if not module.botocore_at_least(options["botocore_version"]):
continue
try:
update_method = getattr(client, options.get("update_method", "update_" + method))
except AttributeError as e:
- module.fail_json_aws(e, "There is no update method 'update_{0}'".format(method))
+ module.fail_json_aws(e, f"There is no update method 'update_{method}'")
if options["current_value"] != options["target_value"]:
changed = True
@@ -609,23 +573,17 @@ def create_or_update_cluster(client, module):
wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE")
else:
module.fail_json(
- msg="Cluster can be updated only in active state, current state is '{0}'. check cluster state or use wait option".format(
- state
- )
+ msg=f"Cluster can be updated only in active state, current state is '{state}'. check cluster state or use wait option"
)
try:
response["changes"][method] = update_method(
- ClusterArn=cluster["ClusterArn"],
- CurrentVersion=version,
- **options["update_params"]
+ ClusterArn=cluster["ClusterArn"], CurrentVersion=version, **options["update_params"]
)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
- module.fail_json_aws(
- e, "Failed to update cluster via 'update_{0}'".format(method)
- )
+ module.fail_json_aws(e, f"Failed to update cluster via 'update_{method}'")
if module.params["wait"]:
wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE")
@@ -636,15 +594,15 @@ def create_or_update_cluster(client, module):
def update_cluster_tags(client, module, arn):
- new_tags = module.params.get('tags')
+ new_tags = module.params.get("tags")
if new_tags is None:
return False
- purge_tags = module.params.get('purge_tags')
+ purge_tags = module.params.get("purge_tags")
try:
- existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)['Tags']
+ existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn))
+ module.fail_json_aws(e, msg=f"Unable to retrieve tags for cluster '{arn}'")
tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
@@ -655,14 +613,13 @@ def update_cluster_tags(client, module, arn):
if tags_to_add:
client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn))
+ module.fail_json_aws(e, msg=f"Unable to set tags for cluster '{arn}'")
changed = bool(tags_to_add) or bool(tags_to_remove)
return changed
def delete_cluster(client, module):
-
cluster = find_cluster_by_name(client, module, module.params["name"])
if module.check_mode:
@@ -691,7 +648,6 @@ def delete_cluster(client, module):
def main():
-
module_args = dict(
name=dict(type="str", required=True),
state=dict(type="str", choices=["present", "absent"], default="present"),
@@ -720,10 +676,7 @@ def main():
type="dict",
options=dict(
in_cluster=dict(type="bool", default=True),
- client_broker=dict(
- choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"],
- default="TLS"
- ),
+ client_broker=dict(choices=["TLS", "TLS_PLAINTEXT", "PLAINTEXT"], default="TLS"),
),
),
),
@@ -783,30 +736,28 @@ def main():
),
wait=dict(type="bool", default=False),
wait_timeout=dict(type="int", default=3600),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
)
module = AnsibleAWSModule(
argument_spec=module_args,
- required_if=[['state', 'present', ['version', 'configuration_arn', 'configuration_revision', 'subnets']]],
- supports_check_mode=True
+ required_if=[["state", "present", ["version", "configuration_arn", "configuration_revision", "subnets"]]],
+ supports_check_mode=True,
)
client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff())
if module.params["state"] == "present":
if len(module.params["subnets"]) < 2:
- module.fail_json(
- msg="At least two client subnets should be provided"
- )
+ module.fail_json(msg="At least two client subnets should be provided")
if int(module.params["nodes"]) % int(len(module.params["subnets"])) != 0:
module.fail_json(
msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter"
)
if len(module.params["name"]) > 64:
module.fail_json(
- module.fail_json(msg='Cluster name "{0}" exceeds 64 character limit'.format(module.params["name"]))
+ module.fail_json(msg=f"Cluster name \"{module.params['name']}\" exceeds 64 character limit")
)
changed, response = create_or_update_cluster(client, module)
elif module.params["state"] == "absent":
@@ -816,9 +767,7 @@ def main():
bootstrap_broker_string = {}
if response.get("ClusterArn") and module.params["state"] == "present":
try:
- cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)[
- "ClusterInfo"
- ]
+ cluster_info = client.describe_cluster(ClusterArn=response["ClusterArn"], aws_retry=True)["ClusterInfo"]
if cluster_info.get("State") == "ACTIVE":
brokers = client.get_bootstrap_brokers(ClusterArn=response["ClusterArn"], aws_retry=True)
if brokers.get("BootstrapBrokerString"):
@@ -831,9 +780,7 @@ def main():
) as e:
module.fail_json_aws(
e,
- "Can not obtain information about cluster {0}".format(
- response["ClusterArn"]
- ),
+ f"Can not obtain information about cluster {response['ClusterArn']}",
)
module.exit_json(
diff --git a/ansible_collections/community/aws/plugins/modules/msk_config.py b/ansible_collections/community/aws/plugins/modules/msk_config.py
index 812eba16d..2469f9598 100644
--- a/ansible_collections/community/aws/plugins/modules/msk_config.py
+++ b/ansible_collections/community/aws/plugins/modules/msk_config.py
@@ -1,12 +1,9 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2021, Daniil Kupchenko (@oukooveu)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
DOCUMENTATION = r"""
---
module: msk_config
@@ -44,8 +41,8 @@ options:
type: list
elements: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
"""
@@ -99,18 +96,19 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- camel_dict_to_snake_dict,
- AWSRetry,
-)
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def dict_to_prop(d):
"""convert dictionary to multi-line properties"""
if len(d) == 0:
return ""
- return "\n".join("{0}={1}".format(k, v) for k, v in d.items())
+ return "\n".join(f"{k}={v}" for k, v in d.items())
def prop_to_dict(p):
@@ -146,19 +144,13 @@ def find_active_config(client, module):
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="failed to obtain kafka configurations")
- active_configs = list(
- item
- for item in all_configs
- if item["Name"] == name and item["State"] == "ACTIVE"
- )
+ active_configs = list(item for item in all_configs if item["Name"] == name and item["State"] == "ACTIVE")
if active_configs:
if len(active_configs) == 1:
return active_configs[0]
else:
- module.fail_json_aws(
- msg="found more than one active config with name '{0}'".format(name)
- )
+ module.fail_json_aws(msg=f"found more than one active config with name '{name}'")
return None
@@ -195,7 +187,6 @@ def create_config(client, module):
# create new configuration
if not config:
-
if module.check_mode:
return True, {}
@@ -205,7 +196,7 @@ def create_config(client, module):
Description=module.params.get("description"),
KafkaVersions=module.params.get("kafka_versions"),
ServerProperties=dict_to_prop(module.params.get("config")).encode(),
- aws_retry=True
+ aws_retry=True,
)
except (
botocore.exceptions.BotoCoreError,
@@ -216,7 +207,9 @@ def create_config(client, module):
# update existing configuration (creates new revision)
else:
# it's required because 'config' doesn't contain 'ServerProperties'
- response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"])
+ response = get_configuration_revision(
+ client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"]
+ )
if not is_configuration_changed(module, response):
return False, response
@@ -229,7 +222,7 @@ def create_config(client, module):
Arn=config["Arn"],
Description=module.params.get("description"),
ServerProperties=dict_to_prop(module.params.get("config")).encode(),
- aws_retry=True
+ aws_retry=True,
)
except (
botocore.exceptions.BotoCoreError,
@@ -270,7 +263,6 @@ def delete_config(client, module):
def main():
-
module_args = dict(
name=dict(type="str", required=True),
description=dict(type="str", default=""),
@@ -292,7 +284,8 @@ def main():
# return some useless staff in check mode if configuration doesn't exists
# can be useful when these options are referenced by other modules during check mode run
if module.check_mode and not response.get("Arn"):
- arn = "arn:aws:kafka:region:account:configuration/name/id"
+ account_id, partition = get_aws_account_info(module)
+ arn = f"arn:{partition}:kafka:{module.region}:{account_id}:configuration/{module.params['name']}/id"
revision = 1
server_properties = ""
else:
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall.py b/ansible_collections/community/aws/plugins/modules/networkfirewall.py
index 9bb6ebb75..f7fe63f33 100644
--- a/ansible_collections/community/aws/plugins/modules/networkfirewall.py
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: networkfirewall
short_description: manage AWS Network Firewall firewalls
version_added: 4.0.0
@@ -104,34 +102,34 @@ options:
author:
- Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create an AWS Network Firewall
- community.aws.networkfirewall:
name: 'ExampleFirewall'
state: present
policy: 'ExamplePolicy'
subnets:
- - 'subnet-123456789abcdef01'
+ - 'subnet-123456789abcdef01'
# Create an AWS Network Firewall with various options, don't wait for creation
# to finish.
- community.aws.networkfirewall:
name: 'ExampleFirewall'
state: present
- delete_protection: True
+ delete_protection: true
description: "An example Description"
policy: 'ExamplePolicy'
- policy_change_protection: True
+ policy_change_protection: true
subnets:
- - 'subnet-123456789abcdef01'
- - 'subnet-abcdef0123456789a'
- subnet_change_protection: True
+ - 'subnet-123456789abcdef01'
+ - 'subnet-abcdef0123456789a'
+ subnet_change_protection: true
tags:
ExampleTag: Example Value
another_tag: another_example
@@ -142,9 +140,9 @@ EXAMPLES = '''
- community.aws.networkfirewall:
state: absent
name: 'ExampleFirewall'
-'''
+"""
-RETURN = '''
+RETURN = r"""
firewall:
description: The full details of the firewall
returned: success
@@ -269,37 +267,35 @@ firewall:
}
}
}
-'''
-
+"""
-from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager
def main():
-
argument_spec = dict(
- name=dict(type='str', required=False, aliases=['firewall_name']),
- arn=dict(type='str', required=False, aliases=['firewall_arn']),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
- description=dict(type='str', required=False),
- tags=dict(type='dict', required=False, aliases=['resource_tags']),
- purge_tags=dict(type='bool', required=False, default=True),
- wait=dict(type='bool', required=False, default=True),
- wait_timeout=dict(type='int', required=False),
- subnet_change_protection=dict(type='bool', required=False),
- policy_change_protection=dict(type='bool', required=False, aliases=['firewall_policy_change_protection']),
- delete_protection=dict(type='bool', required=False),
- subnets=dict(type='list', elements='str', required=False),
- purge_subnets=dict(type='bool', required=False, default=True),
- policy=dict(type='str', required=False, aliases=['firewall_policy_arn']),
+ name=dict(type="str", required=False, aliases=["firewall_name"]),
+ arn=dict(type="str", required=False, aliases=["firewall_arn"]),
+ state=dict(type="str", required=False, default="present", choices=["present", "absent"]),
+ description=dict(type="str", required=False),
+ tags=dict(type="dict", required=False, aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", required=False, default=True),
+ wait=dict(type="bool", required=False, default=True),
+ wait_timeout=dict(type="int", required=False),
+ subnet_change_protection=dict(type="bool", required=False),
+ policy_change_protection=dict(type="bool", required=False, aliases=["firewall_policy_change_protection"]),
+ delete_protection=dict(type="bool", required=False),
+ subnets=dict(type="list", elements="str", required=False),
+ purge_subnets=dict(type="bool", required=False, default=True),
+ policy=dict(type="str", required=False, aliases=["firewall_policy_arn"]),
)
mutually_exclusive = [
- ('arn', 'name',)
+ ["arn", "name"],
]
required_one_of = [
- ('arn', 'name',)
+ ["arn", "name"],
]
module = AnsibleAWSModule(
@@ -309,30 +305,30 @@ def main():
required_one_of=required_one_of,
)
- arn = module.params.get('arn')
- name = module.params.get('name')
- state = module.params.get('state')
+ arn = module.params.get("arn")
+ name = module.params.get("name")
+ state = module.params.get("state")
manager = NetworkFirewallManager(module, name=name, arn=arn)
- manager.set_wait(module.params.get('wait', None))
- manager.set_wait_timeout(module.params.get('wait_timeout', None))
+ manager.set_wait(module.params.get("wait", None))
+ manager.set_wait_timeout(module.params.get("wait_timeout", None))
- if state == 'absent':
- manager.set_delete_protection(module.params.get('delete_protection', None))
+ if state == "absent":
+ manager.set_delete_protection(module.params.get("delete_protection", None))
manager.delete()
else:
if not manager.original_resource:
- if not module.params.get('subnets', None):
- module.fail_json('The subnets parameter must be provided on creation.')
- if not module.params.get('policy', None):
- module.fail_json('The policy parameter must be provided on creation.')
- manager.set_description(module.params.get('description', None))
- manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None))
- manager.set_subnet_change_protection(module.params.get('subnet_change_protection', None))
- manager.set_policy_change_protection(module.params.get('policy_change_protection', None))
- manager.set_delete_protection(module.params.get('delete_protection', None))
- manager.set_subnets(module.params.get('subnets', None), module.params.get('purge_subnets', None))
- manager.set_policy(module.params.get('policy', None))
+ if not module.params.get("subnets", None):
+ module.fail_json("The subnets parameter must be provided on creation.")
+ if not module.params.get("policy", None):
+ module.fail_json("The policy parameter must be provided on creation.")
+ manager.set_description(module.params.get("description", None))
+ manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None))
+ manager.set_subnet_change_protection(module.params.get("subnet_change_protection", None))
+ manager.set_policy_change_protection(module.params.get("policy_change_protection", None))
+ manager.set_delete_protection(module.params.get("delete_protection", None))
+ manager.set_subnets(module.params.get("subnets", None), module.params.get("purge_subnets", None))
+ manager.set_policy(module.params.get("policy", None))
manager.flush_changes()
results = dict(
@@ -344,9 +340,9 @@ def main():
before=manager.original_resource,
after=manager.updated_resource,
)
- results['diff'] = diff
+ results["diff"] = diff
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py
index 85df6b026..262a31067 100644
--- a/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: networkfirewall_info
short_description: describe AWS Network Firewall firewalls
version_added: 4.0.0
@@ -34,14 +32,15 @@ options:
elements: str
aliases: ['vpcs', 'vpc_id']
-author: Mark Chappell (@tremble)
+author:
+ - Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Describe all firewalls in an account
- community.aws.networkfirewall_info: {}
@@ -53,9 +52,9 @@ EXAMPLES = '''
# Describe a firewall by name
- community.aws.networkfirewall_info:
name: ExampleFirewall
-'''
+"""
-RETURN = '''
+RETURN = r"""
firewall_list:
description: A list of ARNs of the matching firewalls.
type: list
@@ -184,32 +183,30 @@ firewalls:
}
}
}
-'''
+"""
-
-from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallManager
def main():
-
argument_spec = dict(
- name=dict(type='str', required=False),
- arn=dict(type='str', required=False),
- vpc_ids=dict(type='list', required=False, elements='str', aliases=['vpcs', 'vpc_id']),
+ name=dict(type="str", required=False),
+ arn=dict(type="str", required=False),
+ vpc_ids=dict(type="list", required=False, elements="str", aliases=["vpcs", "vpc_id"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
- ('arn', 'name', 'vpc_ids',),
+ ["arn", "name", "vpc_ids"],
],
)
- arn = module.params.get('arn')
- name = module.params.get('name')
- vpcs = module.params.get('vpc_ids')
+ arn = module.params.get("arn")
+ name = module.params.get("name")
+ vpcs = module.params.get("vpc_ids")
manager = NetworkFirewallManager(module)
@@ -218,20 +215,20 @@ def main():
if name or arn:
firewall = manager.get_firewall(name=name, arn=arn)
if firewall:
- results['firewalls'] = [firewall]
+ results["firewalls"] = [firewall]
else:
- results['firewalls'] = []
+ results["firewalls"] = []
else:
if vpcs:
firewall_list = manager.list(vpc_ids=vpcs)
else:
firewall_list = manager.list()
- results['firewall_list'] = firewall_list
+ results["firewall_list"] = firewall_list
firewalls = [manager.get_firewall(arn=f) for f in firewall_list]
- results['firewalls'] = firewalls
+ results["firewalls"] = firewalls
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py
index 1026138a6..c742c9546 100644
--- a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: networkfirewall_policy
short_description: manage AWS Network Firewall policies
version_added: 4.0.0
@@ -78,7 +76,6 @@ options:
C(aws:alert_strict) and C(aws:alert_established).
- Only valid for policies where I(strict_rule_order=true).
- When creating a new policy defaults to C(aws:drop_strict).
- - I(stateful_default_actions) requires botocore>=1.21.52.
required: false
type: list
elements: str
@@ -88,7 +85,6 @@ options:
- When I(strict_rule_order='strict') rules and rule groups are evaluated in
the order that they're defined.
- Cannot be updated after creation.
- - I(stateful_rule_order) requires botocore>=1.21.52.
required: false
type: str
choices: ['default', 'strict']
@@ -139,17 +135,16 @@ options:
type: int
required: false
-
author:
- Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
- amazon.aws.tags
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create an AWS Network Firewall Policy with default rule order
- community.aws.networkfirewall_policy:
stateful_rule_order: 'default'
@@ -178,9 +173,9 @@ EXAMPLES = '''
- community.aws.networkfirewall_policy:
state: absent
name: 'ExampleDropPolicy'
-'''
+"""
-RETURN = '''
+RETURN = r"""
policy:
description: The details of the policy
type: dict
@@ -336,48 +331,53 @@ policy:
type: dict
returned: success
example: {'tagName': 'Some Value'}
-'''
-
+"""
-from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager
def main():
-
custom_action_options = dict(
- name=dict(type='str', required=True),
+ name=dict(type="str", required=True),
# Poorly documented, but "publishMetricAction.dimensions ... must have length less than or equal to 1"
- publish_metric_dimension_value=dict(type='str', required=False, aliases=['publish_metric_dimension_values']),
+ publish_metric_dimension_value=dict(type="str", required=False, aliases=["publish_metric_dimension_values"]),
# NetworkFirewallPolicyManager can cope with a list for future-proofing
# publish_metric_dimension_values=dict(type='list', elements='str', required=False, aliases=['publish_metric_dimension_value']),
)
argument_spec = dict(
- name=dict(type='str', required=False),
- arn=dict(type='str', required=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
- description=dict(type='str', required=False),
- tags=dict(type='dict', required=False, aliases=['resource_tags']),
- purge_tags=dict(type='bool', required=False, default=True),
- stateful_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateful_groups']),
- stateless_rule_groups=dict(type='list', elements='str', required=False, aliases=['stateless_groups']),
- stateful_default_actions=dict(type='list', elements='str', required=False),
- stateless_default_actions=dict(type='list', elements='str', required=False),
- stateless_fragment_default_actions=dict(type='list', elements='str', required=False),
- stateful_rule_order=dict(type='str', required=False, choices=['strict', 'default'], aliases=['rule_order']),
- stateless_custom_actions=dict(type='list', elements='dict', required=False,
- options=custom_action_options, aliases=['custom_stateless_actions']),
- purge_stateless_custom_actions=dict(type='bool', required=False, default=True, aliases=['purge_custom_stateless_actions']),
- wait=dict(type='bool', required=False, default=True),
- wait_timeout=dict(type='int', required=False),
+ name=dict(type="str", required=False),
+ arn=dict(type="str", required=False),
+ state=dict(type="str", required=False, default="present", choices=["present", "absent"]),
+ description=dict(type="str", required=False),
+ tags=dict(type="dict", required=False, aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", required=False, default=True),
+ stateful_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateful_groups"]),
+ stateless_rule_groups=dict(type="list", elements="str", required=False, aliases=["stateless_groups"]),
+ stateful_default_actions=dict(type="list", elements="str", required=False),
+ stateless_default_actions=dict(type="list", elements="str", required=False),
+ stateless_fragment_default_actions=dict(type="list", elements="str", required=False),
+ stateful_rule_order=dict(type="str", required=False, choices=["strict", "default"], aliases=["rule_order"]),
+ stateless_custom_actions=dict(
+ type="list",
+ elements="dict",
+ required=False,
+ options=custom_action_options,
+ aliases=["custom_stateless_actions"],
+ ),
+ purge_stateless_custom_actions=dict(
+ type="bool", required=False, default=True, aliases=["purge_custom_stateless_actions"]
+ ),
+ wait=dict(type="bool", required=False, default=True),
+ wait_timeout=dict(type="int", required=False),
)
mutually_exclusive = [
- ('arn', 'name',)
+ ["arn", "name"],
]
required_one_of = [
- ('arn', 'name',)
+ ["arn", "name"],
]
module = AnsibleAWSModule(
@@ -387,36 +387,32 @@ def main():
required_one_of=required_one_of,
)
- arn = module.params.get('arn')
- name = module.params.get('name')
- state = module.params.get('state')
+ arn = module.params.get("arn")
+ name = module.params.get("name")
+ state = module.params.get("state")
manager = NetworkFirewallPolicyManager(module, name=name, arn=arn)
- manager.set_wait(module.params.get('wait', None))
- manager.set_wait_timeout(module.params.get('wait_timeout', None))
+ manager.set_wait(module.params.get("wait", None))
+ manager.set_wait_timeout(module.params.get("wait_timeout", None))
- rule_order = module.params.get('stateful_rule_order')
- if rule_order and rule_order != "default":
- module.require_botocore_at_least('1.21.52', reason='to set the rule order')
- if module.params.get('stateful_default_actions'):
- module.require_botocore_at_least(
- '1.21.52', reason='to set the default actions for stateful flows')
+ rule_order = module.params.get("stateful_rule_order")
- if state == 'absent':
+ if state == "absent":
manager.delete()
else:
- manager.set_description(module.params.get('description', None))
- manager.set_tags(module.params.get('tags', None), module.params.get('purge_tags', None))
+ manager.set_description(module.params.get("description", None))
+ manager.set_tags(module.params.get("tags", None), module.params.get("purge_tags", None))
# Actions need to be defined before potentially consuming them
manager.set_custom_stateless_actions(
- module.params.get('stateless_custom_actions', None),
- module.params.get('purge_stateless_custom_actions', True)),
- manager.set_stateful_rule_order(module.params.get('stateful_rule_order', None))
- manager.set_stateful_rule_groups(module.params.get('stateful_rule_groups', None))
- manager.set_stateless_rule_groups(module.params.get('stateless_rule_groups', None))
- manager.set_stateful_default_actions(module.params.get('stateful_default_actions', None))
- manager.set_stateless_default_actions(module.params.get('stateless_default_actions', None))
- manager.set_stateless_fragment_default_actions(module.params.get('stateless_fragment_default_actions', None))
+ module.params.get("stateless_custom_actions", None),
+ module.params.get("purge_stateless_custom_actions", True),
+ ),
+ manager.set_stateful_rule_order(module.params.get("stateful_rule_order", None))
+ manager.set_stateful_rule_groups(module.params.get("stateful_rule_groups", None))
+ manager.set_stateless_rule_groups(module.params.get("stateless_rule_groups", None))
+ manager.set_stateful_default_actions(module.params.get("stateful_default_actions", None))
+ manager.set_stateless_default_actions(module.params.get("stateless_default_actions", None))
+ manager.set_stateless_fragment_default_actions(module.params.get("stateless_fragment_default_actions", None))
manager.flush_changes()
@@ -429,9 +425,9 @@ def main():
before=manager.original_resource,
after=manager.updated_resource,
)
- results['diff'] = diff
+ results["diff"] = diff
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py
index 1f170f5b3..3bb921745 100644
--- a/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_policy_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: networkfirewall_policy_info
short_description: describe AWS Network Firewall policies
version_added: 4.0.0
@@ -26,14 +24,15 @@ options:
required: false
type: str
-author: Mark Chappell (@tremble)
+author:
+ - Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Describe all Firewall policies in an account
- community.aws.networkfirewall_policy_info: {}
@@ -45,9 +44,9 @@ EXAMPLES = '''
# Describe a Firewall policy by name
- community.aws.networkfirewall_policy_info:
name: ExamplePolicy
-'''
+"""
-RETURN = '''
+RETURN = r"""
policy_list:
description: A list of ARNs of the matching policies.
type: list
@@ -212,30 +211,28 @@ policies:
type: dict
returned: success
example: {'tagName': 'Some Value'}
-'''
+"""
-
-from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallPolicyManager
def main():
-
argument_spec = dict(
- name=dict(type='str', required=False),
- arn=dict(type='str', required=False),
+ name=dict(type="str", required=False),
+ arn=dict(type="str", required=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
- ('arn', 'name',),
+ ["arn", "name"],
],
)
- arn = module.params.get('arn')
- name = module.params.get('name')
+ arn = module.params.get("arn")
+ name = module.params.get("name")
manager = NetworkFirewallPolicyManager(module)
@@ -244,17 +241,17 @@ def main():
if name or arn:
policy = manager.get_policy(name=name, arn=arn)
if policy:
- results['policies'] = [policy]
+ results["policies"] = [policy]
else:
- results['policies'] = []
+ results["policies"] = []
else:
policy_list = manager.list()
- results['policy_list'] = policy_list
+ results["policy_list"] = policy_list
policies = [manager.get_policy(arn=p) for p in policy_list]
- results['policies'] = policies
+ results["policies"] = policies
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py
index c8e2ea38b..9300036c5 100644
--- a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: networkfirewall_rule_group
short_description: create, delete and modify AWS Network Firewall rule groups
version_added: 4.0.0
@@ -60,7 +58,6 @@ options:
- Mutually exclusive with I(rule_type=stateless).
- For more information on how rules are evaluated read the AWS documentation
U(https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html).
- - I(rule_order) requires botocore>=1.23.23.
type: str
required: false
choices: ['default', 'strict']
@@ -263,17 +260,16 @@ options:
type: int
required: false
-
author:
- Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Create a rule group
- name: Create a minimal AWS Network Firewall Rule Group
community.aws.networkfirewall_rule_group:
@@ -369,8 +365,8 @@ EXAMPLES = '''
domain_names:
- 'example.com'
- '.example.net'
- filter_https: True
- filter_http: True
+ filter_https: true
+ filter_http: true
action: allow
source_ips: '192.0.2.0/24'
@@ -396,10 +392,9 @@ EXAMPLES = '''
name: 'MinimalGroup'
type: 'stateful'
state: absent
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
rule_group:
description: Details of the rules in the rule group
type: dict
@@ -708,109 +703,104 @@ rule_group:
type: str
returned: success
example: 'STATEFUL'
-'''
-
+"""
-from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager
def main():
-
domain_list_spec = dict(
- domain_names=dict(type='list', elements='str', required=True),
- filter_http=dict(type='bool', required=False, default=False),
- filter_https=dict(type='bool', required=False, default=False),
- action=dict(type='str', required=True, choices=['allow', 'deny']),
- source_ips=dict(type='list', elements='str', required=False),
+ domain_names=dict(type="list", elements="str", required=True),
+ filter_http=dict(type="bool", required=False, default=False),
+ filter_https=dict(type="bool", required=False, default=False),
+ action=dict(type="str", required=True, choices=["allow", "deny"]),
+ source_ips=dict(type="list", elements="str", required=False),
)
rule_list_spec = dict(
- action=dict(type='str', required=True, choices=['pass', 'drop', 'alert']),
- protocol=dict(type='str', required=True),
- source=dict(type='str', required=True),
- source_port=dict(type='str', required=True),
- direction=dict(type='str', required=False, default='forward', choices=['forward', 'any']),
- destination=dict(type='str', required=True),
- destination_port=dict(type='str', required=True),
- sid=dict(type='int', required=True),
- rule_options=dict(type='dict', required=False),
+ action=dict(type="str", required=True, choices=["pass", "drop", "alert"]),
+ protocol=dict(type="str", required=True),
+ source=dict(type="str", required=True),
+ source_port=dict(type="str", required=True),
+ direction=dict(type="str", required=False, default="forward", choices=["forward", "any"]),
+ destination=dict(type="str", required=True),
+ destination_port=dict(type="str", required=True),
+ sid=dict(type="int", required=True),
+ rule_options=dict(type="dict", required=False),
)
argument_spec = dict(
- arn=dict(type='str', required=False),
- name=dict(type='str', required=False),
- rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateful']),
+ arn=dict(type="str", required=False),
+ name=dict(type="str", required=False),
+ rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateful"]),
# rule_type=dict(type='str', required=True, aliases=['type'], choices=['stateless', 'stateful']),
- state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
- capacity=dict(type='int', required=False),
- rule_order=dict(type='str', required=False, aliases=['stateful_rule_order'], choices=['default', 'strict']),
- description=dict(type='str', required=False),
- ip_variables=dict(type='dict', required=False, aliases=['ip_set_variables']),
- purge_ip_variables=dict(type='bool', required=False, aliases=['purge_ip_set_variables'], default=True),
- port_variables=dict(type='dict', required=False, aliases=['port_set_variables']),
- purge_port_variables=dict(type='bool', required=False, aliases=['purge_port_set_variables'], default=True),
- rule_strings=dict(type='list', elements='str', required=False),
- domain_list=dict(type='dict', options=domain_list_spec, required=False),
- rule_list=dict(type='list', elements='dict', aliases=['stateful_rule_list'], options=rule_list_spec, required=False),
- tags=dict(type='dict', required=False, aliases=['resource_tags']),
- purge_tags=dict(type='bool', required=False, default=True),
- wait=dict(type='bool', required=False, default=True),
- wait_timeout=dict(type='int', required=False),
+ state=dict(type="str", required=False, choices=["present", "absent"], default="present"),
+ capacity=dict(type="int", required=False),
+ rule_order=dict(type="str", required=False, aliases=["stateful_rule_order"], choices=["default", "strict"]),
+ description=dict(type="str", required=False),
+ ip_variables=dict(type="dict", required=False, aliases=["ip_set_variables"]),
+ purge_ip_variables=dict(type="bool", required=False, aliases=["purge_ip_set_variables"], default=True),
+ port_variables=dict(type="dict", required=False, aliases=["port_set_variables"]),
+ purge_port_variables=dict(type="bool", required=False, aliases=["purge_port_set_variables"], default=True),
+ rule_strings=dict(type="list", elements="str", required=False),
+ domain_list=dict(type="dict", options=domain_list_spec, required=False),
+ rule_list=dict(
+ type="list", elements="dict", aliases=["stateful_rule_list"], options=rule_list_spec, required=False
+ ),
+ tags=dict(type="dict", required=False, aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", required=False, default=True),
+ wait=dict(type="bool", required=False, default=True),
+ wait_timeout=dict(type="int", required=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
- ('name', 'arn'),
- ('rule_strings', 'domain_list', 'rule_list'),
- ('domain_list', 'ip_variables'),
+ ["name", "arn"],
+ ["rule_strings", "domain_list", "rule_list"],
+ ["domain_list", "ip_variables"],
],
required_together=[
- ('name', 'rule_type'),
+ ["name", "rule_type"],
],
required_one_of=[
- ('name', 'arn'),
+ ["name", "arn"],
],
)
- module.require_botocore_at_least('1.19.20')
-
- state = module.params.get('state')
- name = module.params.get('name')
- arn = module.params.get('arn')
- rule_type = module.params.get('rule_type')
-
- if rule_type == 'stateless':
- if module.params.get('rule_order'):
- module.fail_json('rule_order can not be set for stateless rule groups')
- if module.params.get('rule_strings'):
- module.fail_json('rule_strings can only be used for stateful rule groups')
- if module.params.get('rule_list'):
- module.fail_json('rule_list can only be used for stateful rule groups')
- if module.params.get('domain_list'):
- module.fail_json('domain_list can only be used for stateful rule groups')
-
- if module.params.get('rule_order'):
- module.require_botocore_at_least('1.23.23', reason='to set the rule order')
+ state = module.params.get("state")
+ name = module.params.get("name")
+ arn = module.params.get("arn")
+ rule_type = module.params.get("rule_type")
+
+ if rule_type == "stateless":
+ if module.params.get("rule_order"):
+ module.fail_json("rule_order can not be set for stateless rule groups")
+ if module.params.get("rule_strings"):
+ module.fail_json("rule_strings can only be used for stateful rule groups")
+ if module.params.get("rule_list"):
+ module.fail_json("rule_list can only be used for stateful rule groups")
+ if module.params.get("domain_list"):
+ module.fail_json("domain_list can only be used for stateful rule groups")
manager = NetworkFirewallRuleManager(module, arn=arn, name=name, rule_type=rule_type)
- manager.set_wait(module.params.get('wait', None))
- manager.set_wait_timeout(module.params.get('wait_timeout', None))
+ manager.set_wait(module.params.get("wait", None))
+ manager.set_wait_timeout(module.params.get("wait_timeout", None))
- if state == 'absent':
+ if state == "absent":
manager.delete()
else:
- manager.set_description(module.params.get('description'))
- manager.set_capacity(module.params.get('capacity'))
- manager.set_rule_order(module.params.get('rule_order'))
- manager.set_ip_variables(module.params.get('ip_variables'), module.params.get('purge_ip_variables'))
- manager.set_port_variables(module.params.get('port_variables'), module.params.get('purge_port_variables'))
- manager.set_rule_string(module.params.get('rule_strings'))
- manager.set_domain_list(module.params.get('domain_list'))
- manager.set_rule_list(module.params.get('rule_list'))
- manager.set_tags(module.params.get('tags'), module.params.get('purge_tags'))
+ manager.set_description(module.params.get("description"))
+ manager.set_capacity(module.params.get("capacity"))
+ manager.set_rule_order(module.params.get("rule_order"))
+ manager.set_ip_variables(module.params.get("ip_variables"), module.params.get("purge_ip_variables"))
+ manager.set_port_variables(module.params.get("port_variables"), module.params.get("purge_port_variables"))
+ manager.set_rule_string(module.params.get("rule_strings"))
+ manager.set_domain_list(module.params.get("domain_list"))
+ manager.set_rule_list(module.params.get("rule_list"))
+ manager.set_tags(module.params.get("tags"), module.params.get("purge_tags"))
manager.flush_changes()
@@ -823,9 +813,9 @@ def main():
before=manager.original_resource,
after=manager.updated_resource,
)
- results['diff'] = diff
+ results["diff"] = diff
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py
index a9cec3778..8b3c9d230 100644
--- a/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py
+++ b/ansible_collections/community/aws/plugins/modules/networkfirewall_rule_group_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: networkfirewall_rule_group_info
short_description: describe AWS Network Firewall rule groups
version_added: 4.0.0
@@ -38,19 +36,19 @@ options:
- When I(scope='account') returns a description of all rule groups in the account.
- When I(scope='managed') returns a list of available managed rule group arns.
- By default searches only at the account scope.
- - I(scope='managed') requires botocore>=1.23.23.
required: false
choices: ['managed', 'account']
type: str
-author: Mark Chappell (@tremble)
+author:
+ - Mark Chappell (@tremble)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Describe all Rule Groups in an account (excludes managed groups)
- community.aws.networkfirewall_rule_group_info: {}
@@ -68,10 +66,9 @@ EXAMPLES = '''
- community.aws.networkfirewall_rule_group_info:
name: ExampleRuleGroup
type: stateful
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
rule_list:
description: A list of ARNs of the matching rule groups.
type: list
@@ -387,43 +384,36 @@ rule_groups:
type: str
returned: success
example: 'STATEFUL'
-'''
-
+"""
-from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.networkfirewall import NetworkFirewallRuleManager
def main():
-
argument_spec = dict(
- name=dict(type='str', required=False),
- rule_type=dict(type='str', required=False, aliases=['type'], choices=['stateless', 'stateful']),
- arn=dict(type='str', required=False),
- scope=dict(type='str', required=False, choices=['managed', 'account']),
+ name=dict(type="str", required=False),
+ rule_type=dict(type="str", required=False, aliases=["type"], choices=["stateless", "stateful"]),
+ arn=dict(type="str", required=False),
+ scope=dict(type="str", required=False, choices=["managed", "account"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
- ('arn', 'name',),
- ('arn', 'rule_type'),
+ ["arn", "name"],
+ ["arn", "rule_type"],
],
required_together=[
- ('name', 'rule_type'),
- ]
+ ["name", "rule_type"],
+ ],
)
- module.require_botocore_at_least('1.19.20')
-
- arn = module.params.get('arn')
- name = module.params.get('name')
- rule_type = module.params.get('rule_type')
- scope = module.params.get('scope')
-
- if module.params.get('scope') == 'managed':
- module.require_botocore_at_least('1.23.23', reason='to list managed rules')
+ arn = module.params.get("arn")
+ name = module.params.get("name")
+ rule_type = module.params.get("rule_type")
+ scope = module.params.get("scope")
manager = NetworkFirewallRuleManager(module, name=name, rule_type=rule_type)
@@ -432,18 +422,18 @@ def main():
if name or arn:
rule = manager.get_rule_group(name=name, rule_type=rule_type, arn=arn)
if rule:
- results['rule_groups'] = [rule]
+ results["rule_groups"] = [rule]
else:
- results['rule_groups'] = []
+ results["rule_groups"] = []
else:
rule_list = manager.list(scope=scope)
- results['rule_list'] = rule_list
- if scope != 'managed':
+ results["rule_list"] = rule_list
+ if scope != "managed":
rules = [manager.get_rule_group(arn=r) for r in rule_list]
- results['rule_groups'] = rules
+ results["rule_groups"] = rules
module.exit_json(**results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/opensearch.py b/ansible_collections/community/aws/plugins/modules/opensearch.py
index 7ed8c0722..d89e173bb 100644
--- a/ansible_collections/community/aws/plugins/modules/opensearch.py
+++ b/ansible_collections/community/aws/plugins/modules/opensearch.py
@@ -1,20 +1,18 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = """
+DOCUMENTATION = r"""
---
module: opensearch
short_description: Creates OpenSearch or ElasticSearch domain
description:
- Creates or modify a Amazon OpenSearch Service domain.
version_added: 4.0.0
-author: "Sebastien Rosset (@sebastien-rosset)"
+author:
+ - "Sebastien Rosset (@sebastien-rosset)"
options:
state:
description:
@@ -387,16 +385,16 @@ options:
- how long before wait gives up, in seconds.
default: 300
type: int
-requirements:
- - botocore >= 1.21.38
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
+ - amazon.aws.boto3
"""
-EXAMPLES = """
+RETURN = r""" # """
+
+EXAMPLES = r"""
- name: Create OpenSearch domain for dev environment, no zone awareness, no dedicated masters
community.aws.opensearch:
@@ -452,16 +450,16 @@ EXAMPLES = """
auto_tune_options:
enabled: true
maintenance_schedules:
- - start_at: "2025-01-12"
- duration:
- value: 1
- unit: "HOURS"
- cron_expression_for_recurrence: "cron(0 12 * * ? *)"
- - start_at: "2032-01-12"
- duration:
- value: 2
- unit: "HOURS"
- cron_expression_for_recurrence: "cron(0 12 * * ? *)"
+ - start_at: "2025-01-12"
+ duration:
+ value: 1
+ unit: "HOURS"
+ cron_expression_for_recurrence: "cron(0 12 * * ? *)"
+ - start_at: "2032-01-12"
+ duration:
+ value: 2
+ unit: "HOURS"
+ cron_expression_for_recurrence: "cron(0 12 * * ? *)"
tags:
Environment: Development
Application: Search
@@ -480,12 +478,11 @@ EXAMPLES = """
cluster_config:
instance_count: 40
wait: true
-
"""
-from copy import deepcopy
import datetime
import json
+from copy import deepcopy
try:
import botocore
@@ -494,26 +491,20 @@ except ImportError:
from ansible.module_utils.six import string_types
-# import module snippets
-from ansible_collections.amazon.aws.plugins.module_utils.core import (
- AnsibleAWSModule,
- is_boto3_error_code,
-)
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- AWSRetry,
- boto3_tag_list_to_ansible_dict,
- compare_policies,
-)
-from ansible_collections.community.aws.plugins.module_utils.opensearch import (
- compare_domain_versions,
- ensure_tags,
- get_domain_status,
- get_domain_config,
- get_target_increment_version,
- normalize_opensearch,
- parse_version,
- wait_for_domain_status,
-)
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.opensearch import compare_domain_versions
+from ansible_collections.community.aws.plugins.module_utils.opensearch import ensure_tags
+from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config
+from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status
+from ansible_collections.community.aws.plugins.module_utils.opensearch import get_target_increment_version
+from ansible_collections.community.aws.plugins.module_utils.opensearch import normalize_opensearch
+from ansible_collections.community.aws.plugins.module_utils.opensearch import parse_version
+from ansible_collections.community.aws.plugins.module_utils.opensearch import wait_for_domain_status
def ensure_domain_absent(client, module):
@@ -522,16 +513,17 @@ def ensure_domain_absent(client, module):
domain = get_domain_status(client, module, domain_name)
if module.check_mode:
- module.exit_json(
- changed=True, msg="Would have deleted domain if not in check mode"
- )
+ module.exit_json(changed=True, msg="Would have deleted domain if not in check mode")
try:
client.delete_domain(DomainName=domain_name)
changed = True
except is_boto3_error_code("ResourceNotFoundException"):
# The resource does not exist, or it has already been deleted
return dict(changed=False)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="trying to delete domain")
# If we're not waiting for a delete to complete then we're all done
@@ -543,7 +535,10 @@ def ensure_domain_absent(client, module):
return dict(changed=changed)
except is_boto3_error_code("ResourceNotFoundException"):
return dict(changed=changed)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, "awaiting domain deletion")
@@ -568,8 +563,9 @@ def upgrade_domain(client, module, source_version, target_engine_version):
# It's not possible to upgrade directly to the target version.
# Check the module parameters to determine if this is allowed or not.
if not module.params.get("allow_intermediate_upgrades"):
- module.fail_json(msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format(
- source_version, target_engine_version, next_version))
+ module.fail_json(
+ msg=f"Cannot upgrade from {source_version} to version {target_engine_version}. The highest compatible version is {next_version}"
+ )
parameters = {
"DomainName": domain_name,
@@ -592,17 +588,13 @@ def upgrade_domain(client, module, source_version, target_engine_version):
# raised if it's not possible to upgrade to the target version.
module.fail_json_aws(
e,
- msg="Couldn't upgrade domain {0} from {1} to {2}".format(
- domain_name, current_version, next_version
- ),
+ msg=f"Couldn't upgrade domain {domain_name} from {current_version} to {next_version}",
)
if module.check_mode:
module.exit_json(
changed=True,
- msg="Would have upgraded domain from {0} to {1} if not in check mode".format(
- current_version, next_version
- ),
+ msg=f"Would have upgraded domain from {current_version} to {next_version} if not in check mode",
)
current_version = next_version
@@ -610,9 +602,7 @@ def upgrade_domain(client, module, source_version, target_engine_version):
wait_for_domain_status(client, module, domain_name, "domain_available")
-def set_cluster_config(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_cluster_config(module, current_domain_config, desired_domain_config, change_set):
changed = False
cluster_config = desired_domain_config["ClusterConfig"]
@@ -627,24 +617,16 @@ def set_cluster_config(
if cluster_config["ZoneAwarenessEnabled"]:
if cluster_opts.get("availability_zone_count") is not None:
cluster_config["ZoneAwarenessConfig"] = {
- "AvailabilityZoneCount": cluster_opts.get(
- "availability_zone_count"
- ),
+ "AvailabilityZoneCount": cluster_opts.get("availability_zone_count"),
}
if cluster_opts.get("dedicated_master") is not None:
- cluster_config["DedicatedMasterEnabled"] = cluster_opts.get(
- "dedicated_master"
- )
+ cluster_config["DedicatedMasterEnabled"] = cluster_opts.get("dedicated_master")
if cluster_config["DedicatedMasterEnabled"]:
if cluster_opts.get("dedicated_master_instance_type") is not None:
- cluster_config["DedicatedMasterType"] = cluster_opts.get(
- "dedicated_master_instance_type"
- )
+ cluster_config["DedicatedMasterType"] = cluster_opts.get("dedicated_master_instance_type")
if cluster_opts.get("dedicated_master_instance_count") is not None:
- cluster_config["DedicatedMasterCount"] = cluster_opts.get(
- "dedicated_master_instance_count"
- )
+ cluster_config["DedicatedMasterCount"] = cluster_opts.get("dedicated_master_instance_count")
if cluster_opts.get("warm_enabled") is not None:
cluster_config["WarmEnabled"] = cluster_opts.get("warm_enabled")
@@ -665,32 +647,19 @@ def set_cluster_config(
if cold_storage_opts is not None and cold_storage_opts.get("enabled"):
module.fail_json(msg="Cold Storage is not supported")
cluster_config.pop("ColdStorageOptions", None)
- if (
- current_domain_config is not None
- and "ClusterConfig" in current_domain_config
- ):
+ if current_domain_config is not None and "ClusterConfig" in current_domain_config:
# Remove 'ColdStorageOptions' from the current domain config, otherwise the actual vs desired diff
# will indicate a change must be done.
current_domain_config["ClusterConfig"].pop("ColdStorageOptions", None)
else:
# Elasticsearch 7.9 and above support ColdStorageOptions.
- if (
- cold_storage_opts is not None
- and cold_storage_opts.get("enabled") is not None
- ):
+ if cold_storage_opts is not None and cold_storage_opts.get("enabled") is not None:
cluster_config["ColdStorageOptions"] = {
"Enabled": cold_storage_opts.get("enabled"),
}
- if (
- current_domain_config is not None
- and current_domain_config["ClusterConfig"] != cluster_config
- ):
- change_set.append(
- "ClusterConfig changed from {0} to {1}".format(
- current_domain_config["ClusterConfig"], cluster_config
- )
- )
+ if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config:
+ change_set.append(f"ClusterConfig changed from {current_domain_config['ClusterConfig']} to {cluster_config}")
changed = True
return changed
@@ -716,22 +685,13 @@ def set_ebs_options(module, current_domain_config, desired_domain_config, change
if ebs_opts.get("iops") is not None:
ebs_config["Iops"] = ebs_opts.get("iops")
- if (
- current_domain_config is not None
- and current_domain_config["EBSOptions"] != ebs_config
- ):
- change_set.append(
- "EBSOptions changed from {0} to {1}".format(
- current_domain_config["EBSOptions"], ebs_config
- )
- )
+ if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config:
+ change_set.append(f"EBSOptions changed from {current_domain_config['EBSOptions']} to {ebs_config}")
changed = True
return changed
-def set_encryption_at_rest_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
encryption_at_rest_config = desired_domain_config["EncryptionAtRestOptions"]
encryption_at_rest_opts = module.params.get("encryption_at_rest_options")
@@ -745,50 +705,36 @@ def set_encryption_at_rest_options(
}
else:
if encryption_at_rest_opts.get("kms_key_id") is not None:
- encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get(
- "kms_key_id"
- )
+ encryption_at_rest_config["KmsKeyId"] = encryption_at_rest_opts.get("kms_key_id")
if (
current_domain_config is not None
- and current_domain_config["EncryptionAtRestOptions"]
- != encryption_at_rest_config
+ and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config
):
change_set.append(
- "EncryptionAtRestOptions changed from {0} to {1}".format(
- current_domain_config["EncryptionAtRestOptions"],
- encryption_at_rest_config,
- )
+ f"EncryptionAtRestOptions changed from {current_domain_config['EncryptionAtRestOptions']} to"
+ f" {encryption_at_rest_config}"
)
changed = True
return changed
-def set_node_to_node_encryption_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
- node_to_node_encryption_config = desired_domain_config[
- "NodeToNodeEncryptionOptions"
- ]
+ node_to_node_encryption_config = desired_domain_config["NodeToNodeEncryptionOptions"]
node_to_node_encryption_opts = module.params.get("node_to_node_encryption_options")
if node_to_node_encryption_opts is None:
return changed
if node_to_node_encryption_opts.get("enabled") is not None:
- node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get(
- "enabled"
- )
+ node_to_node_encryption_config["Enabled"] = node_to_node_encryption_opts.get("enabled")
if (
current_domain_config is not None
- and current_domain_config["NodeToNodeEncryptionOptions"]
- != node_to_node_encryption_config
+ and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config
):
change_set.append(
- "NodeToNodeEncryptionOptions changed from {0} to {1}".format(
- current_domain_config["NodeToNodeEncryptionOptions"],
- node_to_node_encryption_config,
- )
+ f"NodeToNodeEncryptionOptions changed from {current_domain_config['NodeToNodeEncryptionOptions']} to"
+ f" {node_to_node_encryption_config}"
)
changed = True
return changed
@@ -846,53 +792,36 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change
pass
else:
# Note the subnets may be the same but be listed in a different order.
- if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(
- vpc_config["SubnetIds"]
- ):
+ if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]):
change_set.append(
- "SubnetIds changed from {0} to {1}".format(
- current_domain_config["VPCOptions"]["SubnetIds"],
- vpc_config["SubnetIds"],
- )
+ f"SubnetIds changed from {current_domain_config['VPCOptions']['SubnetIds']} to"
+ f" {vpc_config['SubnetIds']}"
)
changed = True
- if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(
- vpc_config["SecurityGroupIds"]
- ):
+ if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]):
change_set.append(
- "SecurityGroup changed from {0} to {1}".format(
- current_domain_config["VPCOptions"]["SecurityGroupIds"],
- vpc_config["SecurityGroupIds"],
- )
+ f"SecurityGroup changed from {current_domain_config['VPCOptions']['SecurityGroupIds']} to"
+ f" {vpc_config['SecurityGroupIds']}"
)
changed = True
return changed
-def set_snapshot_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_snapshot_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
snapshot_config = desired_domain_config["SnapshotOptions"]
snapshot_opts = module.params.get("snapshot_options")
if snapshot_opts is None:
return changed
if snapshot_opts.get("automated_snapshot_start_hour") is not None:
- snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get(
- "automated_snapshot_start_hour"
- )
- if (
- current_domain_config is not None
- and current_domain_config["SnapshotOptions"] != snapshot_config
- ):
+ snapshot_config["AutomatedSnapshotStartHour"] = snapshot_opts.get("automated_snapshot_start_hour")
+ if current_domain_config is not None and current_domain_config["SnapshotOptions"] != snapshot_config:
change_set.append("SnapshotOptions changed")
changed = True
return changed
-def set_cognito_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_cognito_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
cognito_config = desired_domain_config["CognitoOptions"]
cognito_opts = module.params.get("cognito_options")
@@ -908,28 +837,17 @@ def set_cognito_options(
if cognito_opts.get("cognito_user_pool_id") is not None:
cognito_config["UserPoolId"] = cognito_opts.get("cognito_user_pool_id")
if cognito_opts.get("cognito_identity_pool_id") is not None:
- cognito_config["IdentityPoolId"] = cognito_opts.get(
- "cognito_identity_pool_id"
- )
+ cognito_config["IdentityPoolId"] = cognito_opts.get("cognito_identity_pool_id")
if cognito_opts.get("cognito_role_arn") is not None:
cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn")
- if (
- current_domain_config is not None
- and current_domain_config["CognitoOptions"] != cognito_config
- ):
- change_set.append(
- "CognitoOptions changed from {0} to {1}".format(
- current_domain_config["CognitoOptions"], cognito_config
- )
- )
+ if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config:
+ change_set.append(f"CognitoOptions changed from {current_domain_config['CognitoOptions']} to {cognito_config}")
changed = True
return changed
-def set_advanced_security_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
advanced_security_config = desired_domain_config["AdvancedSecurityOptions"]
advanced_security_opts = module.params.get("advanced_security_options")
@@ -943,121 +861,87 @@ def set_advanced_security_options(
}
else:
if advanced_security_opts.get("internal_user_database_enabled") is not None:
- advanced_security_config[
- "InternalUserDatabaseEnabled"
- ] = advanced_security_opts.get("internal_user_database_enabled")
+ advanced_security_config["InternalUserDatabaseEnabled"] = advanced_security_opts.get(
+ "internal_user_database_enabled"
+ )
master_user_opts = advanced_security_opts.get("master_user_options")
if master_user_opts is not None:
advanced_security_config.setdefault("MasterUserOptions", {})
if master_user_opts.get("master_user_arn") is not None:
- advanced_security_config["MasterUserOptions"][
- "MasterUserARN"
- ] = master_user_opts.get("master_user_arn")
+ advanced_security_config["MasterUserOptions"]["MasterUserARN"] = master_user_opts.get("master_user_arn")
if master_user_opts.get("master_user_name") is not None:
- advanced_security_config["MasterUserOptions"][
- "MasterUserName"
- ] = master_user_opts.get("master_user_name")
+ advanced_security_config["MasterUserOptions"]["MasterUserName"] = master_user_opts.get(
+ "master_user_name"
+ )
if master_user_opts.get("master_user_password") is not None:
- advanced_security_config["MasterUserOptions"][
- "MasterUserPassword"
- ] = master_user_opts.get("master_user_password")
+ advanced_security_config["MasterUserOptions"]["MasterUserPassword"] = master_user_opts.get(
+ "master_user_password"
+ )
saml_opts = advanced_security_opts.get("saml_options")
if saml_opts is not None:
if saml_opts.get("enabled") is not None:
- advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get(
- "enabled"
- )
+ advanced_security_config["SamlOptions"]["Enabled"] = saml_opts.get("enabled")
idp_opts = saml_opts.get("idp")
if idp_opts is not None:
if idp_opts.get("metadata_content") is not None:
- advanced_security_config["SamlOptions"]["Idp"][
- "MetadataContent"
- ] = idp_opts.get("metadata_content")
+ advanced_security_config["SamlOptions"]["Idp"]["MetadataContent"] = idp_opts.get("metadata_content")
if idp_opts.get("entity_id") is not None:
- advanced_security_config["SamlOptions"]["Idp"][
- "EntityId"
- ] = idp_opts.get("entity_id")
+ advanced_security_config["SamlOptions"]["Idp"]["EntityId"] = idp_opts.get("entity_id")
if saml_opts.get("master_user_name") is not None:
- advanced_security_config["SamlOptions"][
- "MasterUserName"
- ] = saml_opts.get("master_user_name")
+ advanced_security_config["SamlOptions"]["MasterUserName"] = saml_opts.get("master_user_name")
if saml_opts.get("master_backend_role") is not None:
- advanced_security_config["SamlOptions"][
- "MasterBackendRole"
- ] = saml_opts.get("master_backend_role")
+ advanced_security_config["SamlOptions"]["MasterBackendRole"] = saml_opts.get("master_backend_role")
if saml_opts.get("subject_key") is not None:
- advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get(
- "subject_key"
- )
+ advanced_security_config["SamlOptions"]["SubjectKey"] = saml_opts.get("subject_key")
if saml_opts.get("roles_key") is not None:
- advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get(
- "roles_key"
- )
+ advanced_security_config["SamlOptions"]["RolesKey"] = saml_opts.get("roles_key")
if saml_opts.get("session_timeout_minutes") is not None:
- advanced_security_config["SamlOptions"][
- "SessionTimeoutMinutes"
- ] = saml_opts.get("session_timeout_minutes")
+ advanced_security_config["SamlOptions"]["SessionTimeoutMinutes"] = saml_opts.get(
+ "session_timeout_minutes"
+ )
if (
current_domain_config is not None
and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config
):
change_set.append(
- "AdvancedSecurityOptions changed from {0} to {1}".format(
- current_domain_config["AdvancedSecurityOptions"],
- advanced_security_config,
- )
+ f"AdvancedSecurityOptions changed from {current_domain_config['AdvancedSecurityOptions']} to"
+ f" {advanced_security_config}"
)
changed = True
return changed
-def set_domain_endpoint_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
domain_endpoint_config = desired_domain_config["DomainEndpointOptions"]
domain_endpoint_opts = module.params.get("domain_endpoint_options")
if domain_endpoint_opts is None:
return changed
if domain_endpoint_opts.get("enforce_https") is not None:
- domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get(
- "enforce_https"
- )
+ domain_endpoint_config["EnforceHTTPS"] = domain_endpoint_opts.get("enforce_https")
if domain_endpoint_opts.get("tls_security_policy") is not None:
- domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get(
- "tls_security_policy"
- )
+ domain_endpoint_config["TLSSecurityPolicy"] = domain_endpoint_opts.get("tls_security_policy")
if domain_endpoint_opts.get("custom_endpoint_enabled") is not None:
- domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get(
- "custom_endpoint_enabled"
- )
+ domain_endpoint_config["CustomEndpointEnabled"] = domain_endpoint_opts.get("custom_endpoint_enabled")
if domain_endpoint_config["CustomEndpointEnabled"]:
if domain_endpoint_opts.get("custom_endpoint") is not None:
- domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get(
- "custom_endpoint"
- )
+ domain_endpoint_config["CustomEndpoint"] = domain_endpoint_opts.get("custom_endpoint")
if domain_endpoint_opts.get("custom_endpoint_certificate_arn") is not None:
- domain_endpoint_config[
- "CustomEndpointCertificateArn"
- ] = domain_endpoint_opts.get("custom_endpoint_certificate_arn")
+ domain_endpoint_config["CustomEndpointCertificateArn"] = domain_endpoint_opts.get(
+ "custom_endpoint_certificate_arn"
+ )
- if (
- current_domain_config is not None
- and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config
- ):
+ if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config:
change_set.append(
- "DomainEndpointOptions changed from {0} to {1}".format(
- current_domain_config["DomainEndpointOptions"], domain_endpoint_config
- )
+ f"DomainEndpointOptions changed from {current_domain_config['DomainEndpointOptions']} to"
+ f" {domain_endpoint_config}"
)
changed = True
return changed
-def set_auto_tune_options(
- module, current_domain_config, desired_domain_config, change_set
-):
+def set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set):
changed = False
auto_tune_config = desired_domain_config["AutoTuneOptions"]
auto_tune_opts = module.params.get("auto_tune_options")
@@ -1088,31 +972,20 @@ def set_auto_tune_options(
if duration_opt.get("unit") is not None:
schedule_entry["Duration"]["Unit"] = duration_opt.get("unit")
if s.get("cron_expression_for_recurrence") is not None:
- schedule_entry["CronExpressionForRecurrence"] = s.get(
- "cron_expression_for_recurrence"
- )
+ schedule_entry["CronExpressionForRecurrence"] = s.get("cron_expression_for_recurrence")
auto_tune_config["MaintenanceSchedules"].append(schedule_entry)
if current_domain_config is not None:
- if (
- current_domain_config["AutoTuneOptions"]["DesiredState"]
- != auto_tune_config["DesiredState"]
- ):
+ if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]:
change_set.append(
- "AutoTuneOptions.DesiredState changed from {0} to {1}".format(
- current_domain_config["AutoTuneOptions"]["DesiredState"],
- auto_tune_config["DesiredState"],
- )
+ "AutoTuneOptions.DesiredState changed from"
+ f" {current_domain_config['AutoTuneOptions']['DesiredState']} to {auto_tune_config['DesiredState']}"
)
changed = True
- if (
- auto_tune_config["MaintenanceSchedules"]
- != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]
- ):
+ if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]:
change_set.append(
- "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format(
- current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"],
- auto_tune_config["MaintenanceSchedules"],
- )
+ "AutoTuneOptions.MaintenanceSchedules changed from"
+ f" {current_domain_config['AutoTuneOptions']['MaintenanceSchedules']} to"
+ f" {auto_tune_config['MaintenanceSchedules']}"
)
changed = True
return changed
@@ -1127,18 +1000,12 @@ def set_access_policy(module, current_domain_config, desired_domain_config, chan
try:
access_policy_config = json.dumps(access_policy_opt)
except Exception as e:
- module.fail_json(
- msg="Failed to convert the policy into valid JSON: %s" % str(e)
- )
+ module.fail_json(msg=f"Failed to convert the policy into valid JSON: {str(e)}")
if current_domain_config is not None:
# Updating existing domain
current_access_policy = json.loads(current_domain_config["AccessPolicies"])
if not compare_policies(current_access_policy, access_policy_opt):
- change_set.append(
- "AccessPolicy changed from {0} to {1}".format(
- current_access_policy, access_policy_opt
- )
- )
+ change_set.append(f"AccessPolicy changed from {current_access_policy} to {access_policy_opt}")
changed = True
desired_domain_config["AccessPolicies"] = access_policy_config
else:
@@ -1201,53 +1068,26 @@ def ensure_domain_present(client, module):
# Validate the engine_version
v = parse_version(module.params.get("engine_version"))
if v is None:
- module.fail_json(
- "Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y"
- )
+ module.fail_json("Invalid engine_version. Must be Elasticsearch_X.Y or OpenSearch_X.Y")
desired_domain_config["EngineVersion"] = module.params.get("engine_version")
changed = False
change_set = [] # For check mode purpose
- changed |= set_cluster_config(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_ebs_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_encryption_at_rest_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_node_to_node_encryption_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_vpc_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_snapshot_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_cognito_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_advanced_security_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_domain_endpoint_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_auto_tune_options(
- module, current_domain_config, desired_domain_config, change_set
- )
- changed |= set_access_policy(
- module, current_domain_config, desired_domain_config, change_set
- )
+ changed |= set_cluster_config(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_ebs_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_encryption_at_rest_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_node_to_node_encryption_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_vpc_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_snapshot_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_cognito_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_advanced_security_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_domain_endpoint_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_auto_tune_options(module, current_domain_config, desired_domain_config, change_set)
+ changed |= set_access_policy(module, current_domain_config, desired_domain_config, change_set)
if current_domain_config is not None:
- if (
- desired_domain_config["EngineVersion"]
- != current_domain_config["EngineVersion"]
- ):
+ if desired_domain_config["EngineVersion"] != current_domain_config["EngineVersion"]:
changed = True
change_set.append("EngineVersion changed")
upgrade_domain(
@@ -1271,22 +1111,16 @@ def ensure_domain_present(client, module):
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
- module.fail_json_aws(
- e, msg="Couldn't update domain {0}".format(domain_name)
- )
+ module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}")
else:
# Create new OpenSearch cluster
if module.params.get("access_policies") is None:
- module.fail_json(
- "state is present but the following is missing: access_policies"
- )
+ module.fail_json("state is present but the following is missing: access_policies")
changed = True
if module.check_mode:
- module.exit_json(
- changed=True, msg="Would have created a domain if not in check mode"
- )
+ module.exit_json(changed=True, msg="Would have created a domain if not in check mode")
try:
response = client.create_domain(**desired_domain_config)
domain = response["DomainStatus"]
@@ -1295,22 +1129,16 @@ def ensure_domain_present(client, module):
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
- module.fail_json_aws(
- e, msg="Couldn't update domain {0}".format(domain_name)
- )
+ module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}")
try:
- existing_tags = boto3_tag_list_to_ansible_dict(
- client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]
- )
+ existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name)
+ module.fail_json_aws(e, f"Couldn't get tags for domain {domain_name}")
desired_tags = module.params["tags"]
purge_tags = module.params["purge_tags"]
- changed |= ensure_tags(
- client, module, domain_arn, existing_tags, desired_tags, purge_tags
- )
+ changed |= ensure_tags(client, module, domain_arn, existing_tags, desired_tags, purge_tags)
if module.params.get("wait") and not module.check_mode:
wait_for_domain_status(client, module, domain_name, "domain_available")
@@ -1321,7 +1149,6 @@ def ensure_domain_present(client, module):
def main():
-
module = AnsibleAWSModule(
argument_spec=dict(
state=dict(choices=["present", "absent"], default="present"),
@@ -1482,8 +1309,6 @@ def main():
supports_check_mode=True,
)
- module.require_botocore_at_least("1.21.38")
-
try:
client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff())
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
diff --git a/ansible_collections/community/aws/plugins/modules/opensearch_info.py b/ansible_collections/community/aws/plugins/modules/opensearch_info.py
index 700ad26fd..98fce3e03 100644
--- a/ansible_collections/community/aws/plugins/modules/opensearch_info.py
+++ b/ansible_collections/community/aws/plugins/modules/opensearch_info.py
@@ -1,20 +1,18 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = """
+DOCUMENTATION = r"""
---
module: opensearch_info
short_description: obtain information about one or more OpenSearch or ElasticSearch domain
description:
- Obtain information about one Amazon OpenSearch Service domain.
version_added: 4.0.0
-author: "Sebastien Rosset (@sebastien-rosset)"
+author:
+ - "Sebastien Rosset (@sebastien-rosset)"
options:
domain_name:
description:
@@ -28,18 +26,16 @@ options:
all tag key, value pairs.
required: false
type: dict
-requirements:
- - botocore >= 1.21.38
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get information about an OpenSearch domain instance
community.aws.opensearch_info:
- domain-name: my-search-cluster
+ domain_name: my-search-cluster
register: new_cluster_info
- name: Get all OpenSearch instances
@@ -50,9 +46,9 @@ EXAMPLES = '''
tags:
Applications: search
Environment: Development
-'''
+"""
-RETURN = '''
+RETURN = r"""
instances:
description: List of OpenSearch domain instances
returned: always
@@ -441,7 +437,7 @@ instances:
description: The name of the OpenSearch domain.
returned: always
type: str
-'''
+"""
try:
@@ -449,62 +445,63 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
- AWSRetry,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
-)
-from ansible_collections.community.aws.plugins.module_utils.opensearch import (
- get_domain_config,
- get_domain_status,
-)
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_config
+from ansible_collections.community.aws.plugins.module_utils.opensearch import get_domain_status
def domain_info(client, module):
- domain_name = module.params.get('domain_name')
- filter_tags = module.params.get('tags')
+ domain_name = module.params.get("domain_name")
+ filter_tags = module.params.get("tags")
domain_list = []
if domain_name:
domain_status = get_domain_status(client, module, domain_name)
if domain_status:
- domain_list.append({'DomainStatus': domain_status})
+ domain_list.append({"DomainStatus": domain_status})
else:
- domain_summary_list = client.list_domain_names()['DomainNames']
+ domain_summary_list = client.list_domain_names()["DomainNames"]
for d in domain_summary_list:
- domain_status = get_domain_status(client, module, d['DomainName'])
+ domain_status = get_domain_status(client, module, d["DomainName"])
if domain_status:
- domain_list.append({'DomainStatus': domain_status})
+ domain_list.append({"DomainStatus": domain_status})
# Get the domain tags
for domain in domain_list:
current_domain_tags = None
- domain_arn = domain['DomainStatus']['ARN']
+ domain_arn = domain["DomainStatus"]["ARN"]
try:
current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]
- domain['Tags'] = boto3_tag_list_to_ansible_dict(current_domain_tags)
+ domain["Tags"] = boto3_tag_list_to_ansible_dict(current_domain_tags)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
# This could potentially happen if a domain is deleted between the time
# its domain status was queried and the tags were queried.
- domain['Tags'] = {}
+ domain["Tags"] = {}
# Filter by tags
if filter_tags:
for tag_key in filter_tags:
try:
- domain_list = [c for c in domain_list if ('Tags' in c) and (tag_key in c['Tags']) and (c['Tags'][tag_key] == filter_tags[tag_key])]
+ domain_list = [
+ c
+ for c in domain_list
+ if ("Tags" in c) and (tag_key in c["Tags"]) and (c["Tags"][tag_key] == filter_tags[tag_key])
+ ]
except (TypeError, AttributeError) as e:
module.fail_json(msg="OpenSearch tag filtering error", exception=e)
# Get the domain config
for idx, domain in enumerate(domain_list):
- domain_name = domain['DomainStatus']['DomainName']
+ domain_name = domain["DomainStatus"]["DomainName"]
(domain_config, arn) = get_domain_config(client, module, domain_name)
if domain_config:
- domain['DomainConfig'] = domain_config
- domain_list[idx] = camel_dict_to_snake_dict(domain,
- ignore_list=['AdvancedOptions', 'Endpoints', 'Tags'])
+ domain["DomainConfig"] = domain_config
+ domain_list[idx] = camel_dict_to_snake_dict(domain, ignore_list=["AdvancedOptions", "Endpoints", "Tags"])
return dict(changed=False, domains=domain_list)
@@ -513,11 +510,10 @@ def main():
module = AnsibleAWSModule(
argument_spec=dict(
domain_name=dict(required=False),
- tags=dict(type='dict', required=False),
+ tags=dict(type="dict", required=False),
),
supports_check_mode=True,
)
- module.require_botocore_at_least("1.21.38")
try:
client = module.client("opensearch", retry_decorator=AWSRetry.jittered_backoff())
@@ -527,5 +523,5 @@ def main():
module.exit_json(**domain_info(client, module))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift.py b/ansible_collections/community/aws/plugins/modules/redshift.py
index 27e959893..4463722e5 100644
--- a/ansible_collections/community/aws/plugins/modules/redshift.py
+++ b/ansible_collections/community/aws/plugins/modules/redshift.py
@@ -1,14 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
@@ -170,13 +166,13 @@ options:
notes:
- Support for I(tags) and I(purge_tags) was added in release 1.3.0.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Basic cluster provisioning example
community.aws.redshift:
command: create
@@ -191,9 +187,9 @@ EXAMPLES = r'''
identifier: new_cluster
skip_final_cluster_snapshot: true
wait: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
cluster:
description: dictionary containing all the cluster information
returned: success
@@ -257,31 +253,33 @@ cluster:
description: aws tags for cluster.
returned: success
type: dict
-'''
+"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def _ensure_tags(redshift, identifier, existing_tags, module):
"""Compares and update resource tags"""
- account_id = get_aws_account_id(module)
- region = module.params.get('region')
- resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}" .format(region, account_id, identifier)
- tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
+ account_id, partition = get_aws_account_info(module)
+ region = module.region
+ resource_arn = f"arn:{partition}:redshift:{region}:{account_id}:cluster:{identifier}"
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
tags_to_add, tags_to_remove = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), tags, purge_tags)
@@ -304,78 +302,77 @@ def _ensure_tags(redshift, identifier, existing_tags, module):
def _collect_facts(resource):
"""Transform cluster information to dict."""
facts = {
- 'identifier': resource['ClusterIdentifier'],
- 'status': resource['ClusterStatus'],
- 'username': resource['MasterUsername'],
- 'db_name': resource['DBName'],
- 'maintenance_window': resource['PreferredMaintenanceWindow'],
- 'enhanced_vpc_routing': resource['EnhancedVpcRouting']
-
+ "identifier": resource["ClusterIdentifier"],
+ "status": resource["ClusterStatus"],
+ "username": resource["MasterUsername"],
+ "db_name": resource["DBName"],
+ "maintenance_window": resource["PreferredMaintenanceWindow"],
+ "enhanced_vpc_routing": resource["EnhancedVpcRouting"],
}
- for node in resource['ClusterNodes']:
- if node['NodeRole'] in ('SHARED', 'LEADER'):
- facts['private_ip_address'] = node['PrivateIPAddress']
- if facts['enhanced_vpc_routing'] is False:
- facts['public_ip_address'] = node['PublicIPAddress']
+ for node in resource["ClusterNodes"]:
+ if node["NodeRole"] in ("SHARED", "LEADER"):
+ facts["private_ip_address"] = node["PrivateIPAddress"]
+ if facts["enhanced_vpc_routing"] is False:
+ facts["public_ip_address"] = node["PublicIPAddress"]
else:
- facts['public_ip_address'] = None
+ facts["public_ip_address"] = None
break
# Some parameters are not ready instantly if you don't wait for available
# cluster status
- facts['create_time'] = None
- facts['url'] = None
- facts['port'] = None
- facts['availability_zone'] = None
- facts['tags'] = {}
-
- if resource['ClusterStatus'] != "creating":
- facts['create_time'] = resource['ClusterCreateTime']
- facts['url'] = resource['Endpoint']['Address']
- facts['port'] = resource['Endpoint']['Port']
- facts['availability_zone'] = resource['AvailabilityZone']
- facts['tags'] = boto3_tag_list_to_ansible_dict(resource['Tags'])
+ facts["create_time"] = None
+ facts["url"] = None
+ facts["port"] = None
+ facts["availability_zone"] = None
+ facts["tags"] = {}
+
+ if resource["ClusterStatus"] != "creating":
+ facts["create_time"] = resource["ClusterCreateTime"]
+ facts["url"] = resource["Endpoint"]["Address"]
+ facts["port"] = resource["Endpoint"]["Port"]
+ facts["availability_zone"] = resource["AvailabilityZone"]
+ facts["tags"] = boto3_tag_list_to_ansible_dict(resource["Tags"])
return facts
@AWSRetry.jittered_backoff()
def _describe_cluster(redshift, identifier):
- '''
+ """
Basic wrapper around describe_clusters with a retry applied
- '''
- return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
+ """
+ return redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0]
@AWSRetry.jittered_backoff()
def _create_cluster(redshift, **kwargs):
- '''
+ """
Basic wrapper around create_cluster with a retry applied
- '''
+ """
return redshift.create_cluster(**kwargs)
# Simple wrapper around delete, try to avoid throwing an error if some other
# operation is in progress
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"])
def _delete_cluster(redshift, **kwargs):
- '''
+ """
Basic wrapper around delete_cluster with a retry applied.
Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that
we can still delete a cluster if some kind of change operation was in
progress.
- '''
+ """
return redshift.delete_cluster(**kwargs)
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidClusterState"])
def _modify_cluster(redshift, **kwargs):
- '''
+ """
Basic wrapper around modify_cluster with a retry applied.
Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases
where another modification is still in progress
- '''
+ """
return redshift.modify_cluster(**kwargs)
@@ -389,59 +386,71 @@ def create_cluster(module, redshift):
Returns:
"""
- identifier = module.params.get('identifier')
- node_type = module.params.get('node_type')
- username = module.params.get('username')
- password = module.params.get('password')
- d_b_name = module.params.get('db_name')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- tags = module.params.get('tags')
+ identifier = module.params.get("identifier")
+ node_type = module.params.get("node_type")
+ username = module.params.get("username")
+ password = module.params.get("password")
+ d_b_name = module.params.get("db_name")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
+ tags = module.params.get("tags")
changed = True
# Package up the optional parameters
params = {}
- for p in ('cluster_type', 'cluster_security_groups',
- 'vpc_security_group_ids', 'cluster_subnet_group_name',
- 'availability_zone', 'preferred_maintenance_window',
- 'cluster_parameter_group_name',
- 'automated_snapshot_retention_period', 'port',
- 'cluster_version', 'allow_version_upgrade',
- 'number_of_nodes', 'publicly_accessible', 'encrypted',
- 'elastic_ip', 'enhanced_vpc_routing'):
+ for p in (
+ "cluster_type",
+ "cluster_security_groups",
+ "vpc_security_group_ids",
+ "cluster_subnet_group_name",
+ "availability_zone",
+ "preferred_maintenance_window",
+ "cluster_parameter_group_name",
+ "automated_snapshot_retention_period",
+ "port",
+ "cluster_version",
+ "allow_version_upgrade",
+ "number_of_nodes",
+ "publicly_accessible",
+ "encrypted",
+ "elastic_ip",
+ "enhanced_vpc_routing",
+ ):
# https://github.com/boto/boto3/issues/400
if module.params.get(p) is not None:
params[p] = module.params.get(p)
if d_b_name:
- params['d_b_name'] = d_b_name
+ params["d_b_name"] = d_b_name
if tags:
tags = ansible_dict_to_boto3_tag_list(tags)
- params['tags'] = tags
+ params["tags"] = tags
try:
_describe_cluster(redshift, identifier)
changed = False
- except is_boto3_error_code('ClusterNotFound'):
+ except is_boto3_error_code("ClusterNotFound"):
try:
- _create_cluster(redshift,
- ClusterIdentifier=identifier,
- NodeType=node_type,
- MasterUsername=username,
- MasterUserPassword=password,
- **snake_dict_to_camel_dict(params, capitalize_first=True))
+ _create_cluster(
+ redshift,
+ ClusterIdentifier=identifier,
+ NodeType=node_type,
+ MasterUsername=username,
+ MasterUserPassword=password,
+ **snake_dict_to_camel_dict(params, capitalize_first=True),
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to create cluster")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to describe cluster")
if wait:
attempts = wait_timeout // 60
- waiter = redshift.get_waiter('cluster_available')
+ waiter = redshift.get_waiter("cluster_available")
try:
- waiter.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
+ waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout waiting for the cluster creation")
try:
@@ -450,7 +459,7 @@ def create_cluster(module, redshift):
module.fail_json_aws(e, msg="Failed to describe cluster")
if tags:
- if _ensure_tags(redshift, identifier, resource['Tags'], module):
+ if _ensure_tags(redshift, identifier, resource["Tags"], module):
changed = True
resource = _describe_cluster(redshift, identifier)
@@ -464,7 +473,7 @@ def describe_cluster(module, redshift):
module: Ansible module object
redshift: authenticated redshift connection object
"""
- identifier = module.params.get('identifier')
+ identifier = module.params.get("identifier")
try:
resource = _describe_cluster(redshift, identifier)
@@ -482,13 +491,12 @@ def delete_cluster(module, redshift):
redshift: authenticated redshift connection object
"""
- identifier = module.params.get('identifier')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
+ identifier = module.params.get("identifier")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
params = {}
- for p in ('skip_final_cluster_snapshot',
- 'final_cluster_snapshot_identifier'):
+ for p in ("skip_final_cluster_snapshot", "final_cluster_snapshot_identifier"):
if p in module.params:
# https://github.com/boto/boto3/issues/400
if module.params.get(p) is not None:
@@ -496,22 +504,21 @@ def delete_cluster(module, redshift):
try:
_delete_cluster(
- redshift,
- ClusterIdentifier=identifier,
- **snake_dict_to_camel_dict(params, capitalize_first=True))
- except is_boto3_error_code('ClusterNotFound'):
+ redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)
+ )
+ except is_boto3_error_code("ClusterNotFound"):
return False, {}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to delete cluster")
if wait:
attempts = wait_timeout // 60
- waiter = redshift.get_waiter('cluster_deleted')
+ waiter = redshift.get_waiter("cluster_deleted")
try:
- waiter.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
+ waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout deleting the cluster")
@@ -526,148 +533,160 @@ def modify_cluster(module, redshift):
redshift: authenticated redshift connection object
"""
- identifier = module.params.get('identifier')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
+ identifier = module.params.get("identifier")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
# Package up the optional parameters
params = {}
- for p in ('cluster_type', 'cluster_security_groups',
- 'vpc_security_group_ids', 'cluster_subnet_group_name',
- 'availability_zone', 'preferred_maintenance_window',
- 'cluster_parameter_group_name',
- 'automated_snapshot_retention_period', 'port', 'cluster_version',
- 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
+ for p in (
+ "cluster_type",
+ "cluster_security_groups",
+ "vpc_security_group_ids",
+ "cluster_subnet_group_name",
+ "availability_zone",
+ "preferred_maintenance_window",
+ "cluster_parameter_group_name",
+ "automated_snapshot_retention_period",
+ "port",
+ "cluster_version",
+ "allow_version_upgrade",
+ "number_of_nodes",
+ "new_cluster_identifier",
+ ):
# https://github.com/boto/boto3/issues/400
if module.params.get(p) is not None:
params[p] = module.params.get(p)
# enhanced_vpc_routing parameter change needs an exclusive request
- if module.params.get('enhanced_vpc_routing') is not None:
+ if module.params.get("enhanced_vpc_routing") is not None:
try:
_modify_cluster(
- redshift,
- ClusterIdentifier=identifier,
- EnhancedVpcRouting=module.params.get('enhanced_vpc_routing'))
+ redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing")
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+ module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ")
if wait:
attempts = wait_timeout // 60
- waiter = redshift.get_waiter('cluster_available')
+ waiter = redshift.get_waiter("cluster_available")
try:
- waiter.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts))
+ waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e,
- msg="Timeout waiting for cluster enhanced vpc routing modification")
+ module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification")
# change the rest
try:
_modify_cluster(
- redshift,
- ClusterIdentifier=identifier,
- **snake_dict_to_camel_dict(params, capitalize_first=True))
+ redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+ module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ")
- if module.params.get('new_cluster_identifier'):
- identifier = module.params.get('new_cluster_identifier')
+ if module.params.get("new_cluster_identifier"):
+ identifier = module.params.get("new_cluster_identifier")
if wait:
attempts = wait_timeout // 60
- waiter2 = redshift.get_waiter('cluster_available')
+ waiter2 = redshift.get_waiter("cluster_available")
try:
- waiter2.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
+ waiter2.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout waiting for cluster modification")
try:
resource = _describe_cluster(redshift, identifier)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
+ module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ")
- if _ensure_tags(redshift, identifier, resource['Tags'], module):
- resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
+ if _ensure_tags(redshift, identifier, resource["Tags"], module):
+ resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0]
return True, _collect_facts(resource)
def main():
argument_spec = dict(
- command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
+ command=dict(choices=["create", "facts", "delete", "modify"], required=True),
identifier=dict(required=True),
- node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
- 'ds2.8xlarge', 'dc1.large', 'dc2.large',
- 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
- 'dw2.large', 'dw2.8xlarge'], required=False),
+ node_type=dict(
+ choices=[
+ "ds1.xlarge",
+ "ds1.8xlarge",
+ "ds2.xlarge",
+ "ds2.8xlarge",
+ "dc1.large",
+ "dc2.large",
+ "dc1.8xlarge",
+ "dw1.xlarge",
+ "dw1.8xlarge",
+ "dw2.large",
+ "dw2.8xlarge",
+ ],
+ required=False,
+ ),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(required=False),
- cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
- cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'),
- vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'),
- skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
- type='bool', default=False),
- final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
- cluster_subnet_group_name=dict(aliases=['subnet']),
- availability_zone=dict(aliases=['aws_zone', 'zone']),
- preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
- cluster_parameter_group_name=dict(aliases=['param_group_name']),
- automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
- port=dict(type='int'),
- cluster_version=dict(aliases=['version'], choices=['1.0']),
- allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
- number_of_nodes=dict(type='int'),
- publicly_accessible=dict(type='bool', default=False),
- encrypted=dict(type='bool', default=False),
+ cluster_type=dict(choices=["multi-node", "single-node"], default="single-node"),
+ cluster_security_groups=dict(aliases=["security_groups"], type="list", elements="str"),
+ vpc_security_group_ids=dict(aliases=["vpc_security_groups"], type="list", elements="str"),
+ skip_final_cluster_snapshot=dict(aliases=["skip_final_snapshot"], type="bool", default=False),
+ final_cluster_snapshot_identifier=dict(aliases=["final_snapshot_id"], required=False),
+ cluster_subnet_group_name=dict(aliases=["subnet"]),
+ availability_zone=dict(aliases=["aws_zone", "zone"]),
+ preferred_maintenance_window=dict(aliases=["maintance_window", "maint_window"]),
+ cluster_parameter_group_name=dict(aliases=["param_group_name"]),
+ automated_snapshot_retention_period=dict(aliases=["retention_period"], type="int"),
+ port=dict(type="int"),
+ cluster_version=dict(aliases=["version"], choices=["1.0"]),
+ allow_version_upgrade=dict(aliases=["version_upgrade"], type="bool", default=True),
+ number_of_nodes=dict(type="int"),
+ publicly_accessible=dict(type="bool", default=False),
+ encrypted=dict(type="bool", default=False),
elastic_ip=dict(required=False),
- new_cluster_identifier=dict(aliases=['new_identifier']),
- enhanced_vpc_routing=dict(type='bool', default=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True)
+ new_cluster_identifier=dict(aliases=["new_identifier"]),
+ enhanced_vpc_routing=dict(type="bool", default=False),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
)
required_if = [
- ('command', 'delete', ['skip_final_cluster_snapshot']),
- ('command', 'create', ['node_type',
- 'username',
- 'password'])
+ ("command", "delete", ["skip_final_cluster_snapshot"]),
+ ("command", "create", ["node_type", "username", "password"]),
]
module = AnsibleAWSModule(
argument_spec=argument_spec,
- required_if=required_if
+ required_if=required_if,
)
- command = module.params.get('command')
- skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
- final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
+ command = module.params.get("command")
+ skip_final_cluster_snapshot = module.params.get("skip_final_cluster_snapshot")
+ final_cluster_snapshot_identifier = module.params.get("final_cluster_snapshot_identifier")
# can't use module basic required_if check for this case
- if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
- module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")
+ if command == "delete" and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
+ module.fail_json(
+ msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False"
+ )
- conn = module.client('redshift')
+ conn = module.client("redshift")
changed = True
- if command == 'create':
+ if command == "create":
(changed, cluster) = create_cluster(module, conn)
- elif command == 'facts':
+ elif command == "facts":
(changed, cluster) = describe_cluster(module, conn)
- elif command == 'delete':
+ elif command == "delete":
(changed, cluster) = delete_cluster(module, conn)
- elif command == 'modify':
+ elif command == "modify":
(changed, cluster) = modify_cluster(module, conn)
module.exit_json(changed=changed, cluster=cluster)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py b/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
index 1c42ea802..d2894dfcb 100644
--- a/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
+++ b/ansible_collections/community/aws/plugins/modules/redshift_cross_region_snapshots.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, JR Kerkstra <jrkerkstra@example.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: redshift_cross_region_snapshots
version_added: 1.0.0
@@ -15,7 +12,8 @@ short_description: Manage Redshift Cross Region Snapshots
description:
- Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots.
- For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy)
-author: JR Kerkstra (@captainkerk)
+author:
+ - JR Kerkstra (@captainkerk)
options:
cluster_name:
description:
@@ -54,13 +52,12 @@ options:
aliases: [ "retention_period" ]
type: int
extends_documentation_fragment:
-- amazon.aws.ec2
-- amazon.aws.aws
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.region.modules
+ - amazon.aws.common.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: configure cross-region snapshot on cluster `johniscool`
community.aws.redshift_cross_region_snapshots:
cluster_name: johniscool
@@ -84,24 +81,21 @@ EXAMPLES = '''
state: absent
region: us-east-1
destination_region: us-west-2
-'''
+"""
-RETURN = ''' # '''
+RETURN = r""" # """
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class SnapshotController(object):
-
def __init__(self, client, cluster_name):
self.client = client
self.cluster_name = cluster_name
def get_cluster_snapshot_copy_status(self):
- response = self.client.describe_clusters(
- ClusterIdentifier=self.cluster_name
- )
- return response['Clusters'][0].get('ClusterSnapshotCopyStatus')
+ response = self.client.describe_clusters(ClusterIdentifier=self.cluster_name)
+ return response["Clusters"][0].get("ClusterSnapshotCopyStatus")
def enable_snapshot_copy(self, destination_region, grant_name, retention_period):
if grant_name:
@@ -119,78 +113,79 @@ class SnapshotController(object):
)
def disable_snapshot_copy(self):
- self.client.disable_snapshot_copy(
- ClusterIdentifier=self.cluster_name
- )
+ self.client.disable_snapshot_copy(ClusterIdentifier=self.cluster_name)
def modify_snapshot_copy_retention_period(self, retention_period):
self.client.modify_snapshot_copy_retention_period(
- ClusterIdentifier=self.cluster_name,
- RetentionPeriod=retention_period
+ ClusterIdentifier=self.cluster_name, RetentionPeriod=retention_period
)
def requesting_unsupported_modifications(actual, requested):
- if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or
- actual['DestinationRegion'] != requested['destination_region']):
+ if (
+ actual["SnapshotCopyGrantName"] != requested["snapshot_copy_grant"]
+ or actual["DestinationRegion"] != requested["destination_region"]
+ ):
return True
return False
def needs_update(actual, requested):
- if actual['RetentionPeriod'] != requested['snapshot_retention_period']:
+ if actual["RetentionPeriod"] != requested["snapshot_retention_period"]:
return True
return False
def run_module():
argument_spec = dict(
- cluster_name=dict(type='str', required=True, aliases=['cluster']),
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- region=dict(type='str', required=True, aliases=['source']),
- destination_region=dict(type='str', required=True, aliases=['destination']),
- snapshot_copy_grant=dict(type='str', aliases=['copy_grant']),
- snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']),
+ cluster_name=dict(type="str", required=True, aliases=["cluster"]),
+ state=dict(type="str", choices=["present", "absent"], default="present"),
+ region=dict(type="str", required=True, aliases=["source"]),
+ destination_region=dict(type="str", required=True, aliases=["destination"]),
+ snapshot_copy_grant=dict(type="str", aliases=["copy_grant"]),
+ snapshot_retention_period=dict(type="int", required=True, aliases=["retention_period"]),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- supports_check_mode=True
+ supports_check_mode=True,
)
result = dict(
changed=False,
- message=''
+ message="",
)
- connection = module.client('redshift')
+ connection = module.client("redshift")
- snapshot_controller = SnapshotController(client=connection,
- cluster_name=module.params.get('cluster_name'))
+ snapshot_controller = SnapshotController(client=connection, cluster_name=module.params.get("cluster_name"))
current_config = snapshot_controller.get_cluster_snapshot_copy_status()
if current_config is not None:
- if module.params.get('state') == 'present':
+ if module.params.get("state") == "present":
if requesting_unsupported_modifications(current_config, module.params):
- message = 'Cannot modify destination_region or grant_name. ' \
- 'Please disable cross-region snapshots, and re-run.'
+ message = (
+ "Cannot modify destination_region or grant_name. Please disable cross-region snapshots, and re-run."
+ )
module.fail_json(msg=message, **result)
if needs_update(current_config, module.params):
- result['changed'] = True
+ result["changed"] = True
if not module.check_mode:
snapshot_controller.modify_snapshot_copy_retention_period(
- module.params.get('snapshot_retention_period')
+ module.params.get("snapshot_retention_period")
)
else:
- result['changed'] = True
+ result["changed"] = True
if not module.check_mode:
snapshot_controller.disable_snapshot_copy()
else:
- if module.params.get('state') == 'present':
- result['changed'] = True
+ if module.params.get("state") == "present":
+ result["changed"] = True
if not module.check_mode:
- snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'),
- module.params.get('snapshot_copy_grant'),
- module.params.get('snapshot_retention_period'))
+ snapshot_controller.enable_snapshot_copy(
+ module.params.get("destination_region"),
+ module.params.get("snapshot_copy_grant"),
+ module.params.get("snapshot_retention_period"),
+ )
module.exit_json(**result)
@@ -198,5 +193,5 @@ def main():
run_module()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift_info.py b/ansible_collections/community/aws/plugins/modules/redshift_info.py
index ff4da774e..2a346167e 100644
--- a/ansible_collections/community/aws/plugins/modules/redshift_info.py
+++ b/ansible_collections/community/aws/plugins/modules/redshift_info.py
@@ -1,17 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: redshift_info
version_added: 1.0.0
-author: "Jens Carl (@j-carl)"
+author:
+ - "Jens Carl (@j-carl)"
short_description: Gather information about Redshift cluster(s)
description:
- Gather information about Redshift cluster(s).
@@ -30,13 +28,12 @@ options:
required: false
type: dict
extends_documentation_fragment:
-- amazon.aws.ec2
-- amazon.aws.aws
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.region.modules
+ - amazon.aws.common.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do net set authentication details, see the AWS guide for details.
- name: Find all clusters
@@ -65,9 +62,9 @@ EXAMPLES = '''
stack: db
register: redshift_user
failed_when: "{{ redshift_user.results | length == 0 }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
---
cluster_identifier:
@@ -273,46 +270,46 @@ iam_roles:
returned: success
type: list
sample: []
-'''
+"""
import re
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def match_tags(tags_to_match, cluster):
for key, value in tags_to_match.items():
- for tag in cluster['Tags']:
- if key == tag['Key'] and value == tag['Value']:
+ for tag in cluster["Tags"]:
+ if key == tag["Key"] and value == tag["Value"]:
return True
return False
def find_clusters(conn, module, identifier=None, tags=None):
-
try:
- cluster_paginator = conn.get_paginator('describe_clusters')
+ cluster_paginator = conn.get_paginator("describe_clusters")
clusters = cluster_paginator.paginate().build_full_result()
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to fetch clusters.')
+ module.fail_json_aws(e, msg="Failed to fetch clusters.")
matched_clusters = []
if identifier is not None:
- identifier_prog = re.compile('^' + identifier)
-
- for cluster in clusters['Clusters']:
+ identifier_prog = re.compile("^" + identifier)
+ for cluster in clusters["Clusters"]:
matched_identifier = True
if identifier:
- matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
+ matched_identifier = identifier_prog.search(cluster["ClusterIdentifier"])
matched_tags = True
if tags:
@@ -325,24 +322,23 @@ def find_clusters(conn, module, identifier=None, tags=None):
def main():
-
argument_spec = dict(
- cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
- tags=dict(type='dict')
+ cluster_identifier=dict(type="str", aliases=["identifier", "name"]),
+ tags=dict(type="dict"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
- supports_check_mode=True
+ supports_check_mode=True,
)
- cluster_identifier = module.params.get('cluster_identifier')
- cluster_tags = module.params.get('tags')
+ cluster_identifier = module.params.get("cluster_identifier")
+ cluster_tags = module.params.get("tags")
- redshift = module.client('redshift')
+ redshift = module.client("redshift")
results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
module.exit_json(results=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py b/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
index 3c7ca31f5..2ae3a2405 100644
--- a/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
+++ b/ansible_collections/community/aws/plugins/modules/redshift_subnet_group.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: redshift_subnet_group
version_added: 1.0.0
@@ -40,30 +37,30 @@ options:
type: list
elements: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a Redshift subnet group
community.aws.redshift_subnet_group:
state: present
group_name: redshift-subnet
group_description: Redshift subnet
group_subnets:
- - 'subnet-aaaaa'
- - 'subnet-bbbbb'
+ - 'subnet-aaaaa'
+ - 'subnet-bbbbb'
- name: Remove subnet group
community.aws.redshift_subnet_group:
state: absent
group_name: redshift-subnet
-'''
+"""
-RETURN = r'''
+RETURN = r"""
cluster_subnet_group:
description: A dictionary containing information about the Redshift subnet group.
returned: success
@@ -92,7 +89,7 @@ cluster_subnet_group:
sample:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
-'''
+"""
try:
import botocore
@@ -101,10 +98,11 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_subnet_group(name):
@@ -112,10 +110,13 @@ def get_subnet_group(name):
groups = client.describe_cluster_subnet_groups(
aws_retry=True,
ClusterSubnetGroupName=name,
- )['ClusterSubnetGroups']
- except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'):
+ )["ClusterSubnetGroups"]
+ except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"):
return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to describe subnet group")
if not groups:
@@ -129,23 +130,22 @@ def get_subnet_group(name):
# No support for managing tags yet, but make sure that we don't need to
# change the return value structure after it's been available in a release.
- tags = boto3_tag_list_to_ansible_dict(groups[0]['Tags'])
+ tags = boto3_tag_list_to_ansible_dict(groups[0]["Tags"])
subnet_group = camel_dict_to_snake_dict(groups[0])
- subnet_group['tags'] = tags
- subnet_group['name'] = subnet_group['cluster_subnet_group_name']
+ subnet_group["tags"] = tags
+ subnet_group["name"] = subnet_group["cluster_subnet_group_name"]
- subnet_ids = list(s['subnet_identifier'] for s in subnet_group['subnets'])
- subnet_group['subnet_ids'] = subnet_ids
+ subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"])
+ subnet_group["subnet_ids"] = subnet_ids
return subnet_group
def create_subnet_group(name, description, subnets):
-
if not subnets:
- module.fail_json(msg='At least one subnet must be provided when creating a subnet group')
+ module.fail_json(msg="At least one subnet must be provided when creating a subnet group")
if module.check_mode:
return True
@@ -166,13 +166,13 @@ def create_subnet_group(name, description, subnets):
def update_subnet_group(subnet_group, name, description, subnets):
update_params = dict()
- if description and subnet_group['description'] != description:
- update_params['Description'] = description
+ if description and subnet_group["description"] != description:
+ update_params["Description"] = description
if subnets:
- old_subnets = set(subnet_group['subnet_ids'])
+ old_subnets = set(subnet_group["subnet_ids"])
new_subnets = set(subnets)
if old_subnets != new_subnets:
- update_params['SubnetIds'] = list(subnets)
+ update_params["SubnetIds"] = list(subnets)
if not update_params:
return False
@@ -181,8 +181,8 @@ def update_subnet_group(subnet_group, name, description, subnets):
return True
# Description is optional, SubnetIds is not
- if 'SubnetIds' not in update_params:
- update_params['SubnetIds'] = subnet_group['subnet_ids']
+ if "SubnetIds" not in update_params:
+ update_params["SubnetIds"] = subnet_group["subnet_ids"]
try:
client.modify_cluster_subnet_group(
@@ -197,7 +197,6 @@ def update_subnet_group(subnet_group, name, description, subnets):
def delete_subnet_group(name):
-
if module.check_mode:
return True
@@ -207,20 +206,23 @@ def delete_subnet_group(name):
ClusterSubnetGroupName=name,
)
return True
- except is_boto3_error_code('ClusterSubnetGroupNotFoundFault'):
+ except is_boto3_error_code("ClusterSubnetGroupNotFoundFault"):
# AWS is "eventually consistent", cope with the race conditions where
# deletion hadn't completed when we ran describe
return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to delete subnet group")
def main():
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
- name=dict(required=True, aliases=['group_name']),
- description=dict(required=False, aliases=['group_description']),
- subnets=dict(required=False, aliases=['group_subnets'], type='list', elements='str'),
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(required=True, aliases=["group_name"]),
+ description=dict(required=False, aliases=["group_description"]),
+ subnets=dict(required=False, aliases=["group_subnets"], type="list", elements="str"),
)
global module
@@ -231,17 +233,17 @@ def main():
supports_check_mode=True,
)
- state = module.params.get('state')
- name = module.params.get('name')
- description = module.params.get('description')
- subnets = module.params.get('subnets')
+ state = module.params.get("state")
+ name = module.params.get("name")
+ description = module.params.get("description")
+ subnets = module.params.get("subnets")
- client = module.client('redshift', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("redshift", retry_decorator=AWSRetry.jittered_backoff())
subnet_group = get_subnet_group(name)
changed = False
- if state == 'present':
+ if state == "present":
if not subnet_group:
result = create_subnet_group(name, description, subnets)
changed |= result
@@ -257,9 +259,9 @@ def main():
compat_results = dict()
if subnet_group:
- compat_results['group'] = dict(
- name=subnet_group['name'],
- vpc_id=subnet_group['vpc_id'],
+ compat_results["group"] = dict(
+ name=subnet_group["name"],
+ vpc_id=subnet_group["vpc_id"],
)
module.exit_json(
@@ -269,5 +271,5 @@ def main():
)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/route53_wait.py b/ansible_collections/community/aws/plugins/modules/route53_wait.py
new file mode 100644
index 000000000..6b72681d4
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/route53_wait.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2023, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: route53_wait
+version_added: 6.3.0
+short_description: wait for changes in Amazons Route 53 DNS service to propagate
+description:
+ - When using M(amazon.aws.route53) with I(wait=false), this module allows to wait for the
+ module's propagation to finish at a later point of time.
+options:
+ result:
+ aliases:
+ - results
+ description:
+ - The registered result of one or multiple M(amazon.aws.route53) invocations.
+ required: true
+ type: dict
+ wait_timeout:
+ description:
+ - How long to wait for the changes to be replicated, in seconds.
+ - This timeout will be used for every changed result in I(result).
+ default: 300
+ type: int
+ region:
+ description:
+ - This setting is ignored by the module. It is only present to make it possible to
+ have I(region) present in the module default group.
+ type: str
+author:
+ - Felix Fontein (@felixfontein)
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.boto3
+"""
+
+RETURN = r"""
+#
+"""
+
+EXAMPLES = r"""
+# Example when using a single route53 invocation:
+
+- name: Add new.foo.com as an A record with 3 IPs
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: new.foo.com
+ type: A
+ ttl: 7200
+ value:
+ - 1.1.1.1
+ - 2.2.2.2
+ - 3.3.3.3
+ register: module_result
+
+# do something else
+
+- name: Wait for the changes of the above route53 invocation to propagate
+ community.aws.route53_wait:
+ result: "{{ module_result }}"
+
+#########################################################################
+# Example when using a loop over amazon.aws.route53:
+
+- name: Add various A records
+ amazon.aws.route53:
+ state: present
+ zone: foo.com
+ record: "{{ item.record }}"
+ type: A
+ ttl: 300
+ value: "{{ item.value }}"
+ loop:
+ - record: new.foo.com
+ value: 1.1.1.1
+ - record: foo.foo.com
+ value: 2.2.2.2
+ - record: bar.foo.com
+ value:
+ - 3.3.3.3
+ - 4.4.4.4
+ register: module_results
+
+# do something else
+
+- name: Wait for the changes of the above three route53 invocations to propagate
+ community.aws.route53_wait:
+ results: "{{ module_results }}"
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+WAIT_RETRY = 5 # how many seconds to wait between propagation status polls
+
+
+def detect_task_results(results):
+ if "results" in results:
+ # This must be the registered result of a loop of route53 tasks
+ for key in ("changed", "msg", "skipped"):
+ if key not in results:
+ raise ValueError(f"missing {key} key")
+ if not isinstance(results["results"], list):
+ raise ValueError("results is present, but not a list")
+ for index, result in enumerate(results["results"]):
+ if not isinstance(result, dict):
+ raise ValueError(f"result {index + 1} is not a dictionary")
+ for key in ("changed", "failed", "ansible_loop_var", "invocation"):
+ if key not in result:
+ raise ValueError(f"missing {key} key for result {index + 1}")
+ yield f" for result #{index + 1}", result
+ return
+ # This must be a single route53 task
+ for key in ("changed", "failed"):
+ if key not in results:
+ raise ValueError(f"missing {key} key")
+ yield "", results
+
+
+def main():
+ argument_spec = dict(
+ result=dict(type="dict", required=True, aliases=["results"]),
+ wait_timeout=dict(type="int", default=300),
+ region=dict(type="str"), # ignored
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ result_in = module.params["result"]
+ wait_timeout_in = module.params.get("wait_timeout")
+
+ changed_results = []
+ try:
+ for id, result in detect_task_results(result_in):
+ if result.get("wait_id"):
+ changed_results.append((id, result["wait_id"]))
+ except ValueError as exc:
+ module.fail_json(
+ msg=f"The value passed as result does not seem to be a registered route53 result: {to_native(exc)}"
+ )
+
+ # connect to the route53 endpoint
+ try:
+ route53 = module.client("route53")
+ except botocore.exceptions.HTTPClientError as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
+
+ for what, wait_id in changed_results:
+ try:
+ waiter = get_waiter(route53, "resource_record_sets_changed")
+ waiter.wait(
+ Id=wait_id,
+ WaiterConfig=dict(
+ Delay=WAIT_RETRY,
+ MaxAttempts=wait_timeout_in // WAIT_RETRY,
+ ),
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg=f"Timeout waiting for resource records changes{what} to be applied")
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to update records")
+ except Exception as e:
+ module.fail_json(msg=f"Unhandled exception. ({to_native(e)})")
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py b/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py
deleted file mode 100644
index 541a02b0f..000000000
--- a/ansible_collections/community/aws/plugins/modules/s3_bucket_info.py
+++ /dev/null
@@ -1,620 +0,0 @@
-#!/usr/bin/python
-"""
-Copyright (c) 2017 Ansible Project
-GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: s3_bucket_info
-version_added: 1.0.0
-author:
- - "Gerben Geijteman (@hyperized)"
-short_description: Lists S3 buckets in AWS
-description:
- - Lists S3 buckets and details about those buckets.
- - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_bucket_info).
- The usage did not change.
-options:
- name:
- description:
- - Name of bucket to query.
- type: str
- default: ""
- version_added: 1.4.0
- name_filter:
- description:
- - Limits buckets to only buckets who's name contain the string in I(name_filter).
- type: str
- default: ""
- version_added: 1.4.0
- bucket_facts:
- description:
- - Retrieve requested S3 bucket detailed information.
- - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution.
- - You can limit buckets by using the I(name) or I(name_filter) option.
- suboptions:
- bucket_accelerate_configuration:
- description: Retrive S3 accelerate configuration.
- type: bool
- default: False
- bucket_location:
- description: Retrive S3 bucket location.
- type: bool
- default: False
- bucket_replication:
- description: Retrive S3 bucket replication.
- type: bool
- default: False
- bucket_acl:
- description: Retrive S3 bucket ACLs.
- type: bool
- default: False
- bucket_logging:
- description: Retrive S3 bucket logging.
- type: bool
- default: False
- bucket_request_payment:
- description: Retrive S3 bucket request payment.
- type: bool
- default: False
- bucket_tagging:
- description: Retrive S3 bucket tagging.
- type: bool
- default: False
- bucket_cors:
- description: Retrive S3 bucket CORS configuration.
- type: bool
- default: False
- bucket_notification_configuration:
- description: Retrive S3 bucket notification configuration.
- type: bool
- default: False
- bucket_encryption:
- description: Retrive S3 bucket encryption.
- type: bool
- default: False
- bucket_ownership_controls:
- description:
- - Retrive S3 ownership controls.
- type: bool
- default: False
- bucket_website:
- description: Retrive S3 bucket website.
- type: bool
- default: False
- bucket_policy:
- description: Retrive S3 bucket policy.
- type: bool
- default: False
- bucket_policy_status:
- description: Retrive S3 bucket policy status.
- type: bool
- default: False
- bucket_lifecycle_configuration:
- description: Retrive S3 bucket lifecycle configuration.
- type: bool
- default: False
- public_access_block:
- description: Retrive S3 bucket public access block.
- type: bool
- default: False
- type: dict
- version_added: 1.4.0
- transform_location:
- description:
- - S3 bucket location for default us-east-1 is normally reported as C(null).
- - Setting this option to C(true) will return C(us-east-1) instead.
- - Affects only queries with I(bucket_facts=true) and I(bucket_location=true).
- type: bool
- default: False
- version_added: 1.4.0
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Note: Only AWS S3 is currently supported
-
-# Lists all S3 buckets
-- community.aws.s3_bucket_info:
- register: result
-
-# Retrieve detailed bucket information
-- community.aws.s3_bucket_info:
- # Show only buckets with name matching
- name_filter: your.testing
- # Choose facts to retrieve
- bucket_facts:
- # bucket_accelerate_configuration: true
- bucket_acl: true
- bucket_cors: true
- bucket_encryption: true
- # bucket_lifecycle_configuration: true
- bucket_location: true
- # bucket_logging: true
- # bucket_notification_configuration: true
- # bucket_ownership_controls: true
- # bucket_policy: true
- # bucket_policy_status: true
- # bucket_replication: true
- # bucket_request_payment: true
- # bucket_tagging: true
- # bucket_website: true
- # public_access_block: true
- transform_location: true
- register: result
-
-# Print out result
-- name: List buckets
- ansible.builtin.debug:
- msg: "{{ result['buckets'] }}"
-'''
-
-RETURN = '''
-bucket_list:
- description: "List of buckets"
- returned: always
- type: complex
- contains:
- name:
- description: Bucket name.
- returned: always
- type: str
- sample: a-testing-bucket-name
- creation_date:
- description: Bucket creation date timestamp.
- returned: always
- type: str
- sample: "2021-01-21T12:44:10+00:00"
- public_access_block:
- description: Bucket public access block configuration.
- returned: when I(bucket_facts=true) and I(public_access_block=true)
- type: complex
- contains:
- PublicAccessBlockConfiguration:
- description: PublicAccessBlockConfiguration data.
- returned: when PublicAccessBlockConfiguration is defined for the bucket
- type: complex
- contains:
- BlockPublicAcls:
- description: BlockPublicAcls setting value.
- type: bool
- sample: true
- BlockPublicPolicy:
- description: BlockPublicPolicy setting value.
- type: bool
- sample: true
- IgnorePublicAcls:
- description: IgnorePublicAcls setting value.
- type: bool
- sample: true
- RestrictPublicBuckets:
- description: RestrictPublicBuckets setting value.
- type: bool
- sample: true
- bucket_name_filter:
- description: String used to limit buckets. See I(name_filter).
- returned: when I(name_filter) is defined
- type: str
- sample: filter-by-this-string
- bucket_acl:
- description: Bucket ACL configuration.
- returned: when I(bucket_facts=true) and I(bucket_acl=true)
- type: complex
- contains:
- Grants:
- description: List of ACL grants.
- type: list
- sample: []
- Owner:
- description: Bucket owner information.
- type: complex
- contains:
- DisplayName:
- description: Bucket owner user display name.
- returned: always
- type: str
- sample: username
- ID:
- description: Bucket owner user ID.
- returned: always
- type: str
- sample: 123894e509349etc
- bucket_cors:
- description: Bucket CORS configuration.
- returned: when I(bucket_facts=true) and I(bucket_cors=true)
- type: complex
- contains:
- CORSRules:
- description: Bucket CORS configuration.
- returned: when CORS rules are defined for the bucket
- type: list
- sample: []
- bucket_encryption:
- description: Bucket encryption configuration.
- returned: when I(bucket_facts=true) and I(bucket_encryption=true)
- type: complex
- contains:
- ServerSideEncryptionConfiguration:
- description: ServerSideEncryptionConfiguration configuration.
- returned: when encryption is enabled on the bucket
- type: complex
- contains:
- Rules:
- description: List of applied encryptio rules.
- returned: when encryption is enabled on the bucket
- type: list
- sample: { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" }, "BucketKeyEnabled": False }
- bucket_lifecycle_configuration:
- description: Bucket lifecycle configuration settings.
- returned: when I(bucket_facts=true) and I(bucket_lifecycle_configuration=true)
- type: complex
- contains:
- Rules:
- description: List of lifecycle management rules.
- returned: when lifecycle configuration is present
- type: list
- sample: [{ "Status": "Enabled", "ID": "example-rule" }]
- bucket_location:
- description: Bucket location.
- returned: when I(bucket_facts=true) and I(bucket_location=true)
- type: complex
- contains:
- LocationConstraint:
- description: AWS region.
- returned: always
- type: str
- sample: us-east-2
- bucket_logging:
- description: Server access logging configuration.
- returned: when I(bucket_facts=true) and I(bucket_logging=true)
- type: complex
- contains:
- LoggingEnabled:
- description: Server access logging configuration.
- returned: when server access logging is defined for the bucket
- type: complex
- contains:
- TargetBucket:
- description: Target bucket name.
- returned: always
- type: str
- sample: logging-bucket-name
- TargetPrefix:
- description: Prefix in target bucket.
- returned: always
- type: str
- sample: ""
- bucket_notification_configuration:
- description: Bucket notification settings.
- returned: when I(bucket_facts=true) and I(bucket_notification_configuration=true)
- type: complex
- contains:
- TopicConfigurations:
- description: List of notification events configurations.
- returned: when at least one notification is configured
- type: list
- sample: []
- bucket_ownership_controls:
- description: Preffered object ownership settings.
- returned: when I(bucket_facts=true) and I(bucket_ownership_controls=true)
- type: complex
- contains:
- OwnershipControls:
- description: Object ownership settings.
- returned: when ownership controls are defined for the bucket
- type: complex
- contains:
- Rules:
- description: List of ownership rules.
- returned: when ownership rule is defined
- type: list
- sample: [{ "ObjectOwnership:": "ObjectWriter" }]
- bucket_policy:
- description: Bucket policy contents.
- returned: when I(bucket_facts=true) and I(bucket_policy=true)
- type: str
- sample: '{"Version":"2012-10-17","Statement":[{"Sid":"AddCannedAcl","Effect":"Allow",..}}]}'
- bucket_policy_status:
- description: Status of bucket policy.
- returned: when I(bucket_facts=true) and I(bucket_policy_status=true)
- type: complex
- contains:
- PolicyStatus:
- description: Status of bucket policy.
- returned: when bucket policy is present
- type: complex
- contains:
- IsPublic:
- description: Report bucket policy public status.
- returned: when bucket policy is present
- type: bool
- sample: True
- bucket_replication:
- description: Replication configuration settings.
- returned: when I(bucket_facts=true) and I(bucket_replication=true)
- type: complex
- contains:
- Role:
- description: IAM role used for replication.
- returned: when replication rule is defined
- type: str
- sample: "arn:aws:iam::123:role/example-role"
- Rules:
- description: List of replication rules.
- returned: when replication rule is defined
- type: list
- sample: [{ "ID": "rule-1", "Filter": "{}" }]
- bucket_request_payment:
- description: Requester pays setting.
- returned: when I(bucket_facts=true) and I(bucket_request_payment=true)
- type: complex
- contains:
- Payer:
- description: Current payer.
- returned: always
- type: str
- sample: BucketOwner
- bucket_tagging:
- description: Bucket tags.
- returned: when I(bucket_facts=true) and I(bucket_tagging=true)
- type: dict
- sample: { "Tag1": "Value1", "Tag2": "Value2" }
- bucket_website:
- description: Static website hosting.
- returned: when I(bucket_facts=true) and I(bucket_website=true)
- type: complex
- contains:
- ErrorDocument:
- description: Object serving as HTTP error page.
- returned: when static website hosting is enabled
- type: dict
- sample: { "Key": "error.html" }
- IndexDocument:
- description: Object serving as HTTP index page.
- returned: when static website hosting is enabled
- type: dict
- sample: { "Suffix": "error.html" }
- RedirectAllRequestsTo:
- description: Website redict settings.
- returned: when redirect requests is configured
- type: complex
- contains:
- HostName:
- description: Hostname to redirect.
- returned: always
- type: str
- sample: www.example.com
- Protocol:
- description: Protocol used for redirect.
- returned: always
- type: str
- sample: https
-'''
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-def get_bucket_list(module, connection, name="", name_filter=""):
- """
- Return result of list_buckets json encoded
- Filter only buckets matching 'name' or name_filter if defined
- :param module:
- :param connection:
- :return:
- """
- buckets = []
- filtered_buckets = []
- final_buckets = []
-
- # Get all buckets
- try:
- buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code:
- module.fail_json_aws(err_code, msg="Failed to list buckets")
-
- # Filter buckets if requested
- if name_filter:
- for bucket in buckets:
- if name_filter in bucket['name']:
- filtered_buckets.append(bucket)
- elif name:
- for bucket in buckets:
- if name == bucket['name']:
- filtered_buckets.append(bucket)
-
- # Return proper list (filtered or all)
- if name or name_filter:
- final_buckets = filtered_buckets
- else:
- final_buckets = buckets
- return final_buckets
-
-
-def get_buckets_facts(connection, buckets, requested_facts, transform_location):
- """
- Retrive additional information about S3 buckets
- """
- full_bucket_list = []
- # Iterate over all buckets and append retrived facts to bucket
- for bucket in buckets:
- bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location))
- full_bucket_list.append(bucket)
-
- return full_bucket_list
-
-
-def get_bucket_details(connection, name, requested_facts, transform_location):
- """
- Execute all enabled S3API get calls for selected bucket
- """
- all_facts = {}
-
- for key in requested_facts:
- if requested_facts[key]:
- if key == 'bucket_location':
- all_facts[key] = {}
- try:
- all_facts[key] = get_bucket_location(name, connection, transform_location)
- # we just pass on error - error means that resources is undefined
- except botocore.exceptions.ClientError:
- pass
- elif key == 'bucket_tagging':
- all_facts[key] = {}
- try:
- all_facts[key] = get_bucket_tagging(name, connection)
- # we just pass on error - error means that resources is undefined
- except botocore.exceptions.ClientError:
- pass
- else:
- all_facts[key] = {}
- try:
- all_facts[key] = get_bucket_property(name, connection, key)
- # we just pass on error - error means that resources is undefined
- except botocore.exceptions.ClientError:
- pass
-
- return all_facts
-
-
-@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_location(name, connection, transform_location=False):
- """
- Get bucket location and optionally transform 'null' to 'us-east-1'
- """
- data = connection.get_bucket_location(Bucket=name)
-
- # Replace 'null' with 'us-east-1'?
- if transform_location:
- try:
- if not data['LocationConstraint']:
- data['LocationConstraint'] = 'us-east-1'
- except KeyError:
- pass
- # Strip response metadata (not needed)
- data.pop('ResponseMetadata', None)
- return data
-
-
-@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_tagging(name, connection):
- """
- Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function
- """
- data = connection.get_bucket_tagging(Bucket=name)
-
- try:
- bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet'])
- return bucket_tags
- except KeyError:
- # Strip response metadata (not needed)
- data.pop('ResponseMetadata', None)
- return data
-
-
-@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_property(name, connection, get_api_name):
- """
- Get bucket property
- """
- api_call = "get_" + get_api_name
- api_function = getattr(connection, api_call)
- data = api_function(Bucket=name)
-
- # Strip response metadata (not needed)
- data.pop('ResponseMetadata', None)
- return data
-
-
-def main():
- """
- Get list of S3 buckets
- :return:
- """
- argument_spec = dict(
- name=dict(type='str', default=""),
- name_filter=dict(type='str', default=""),
- bucket_facts=dict(type='dict', options=dict(
- bucket_accelerate_configuration=dict(type='bool', default=False),
- bucket_acl=dict(type='bool', default=False),
- bucket_cors=dict(type='bool', default=False),
- bucket_encryption=dict(type='bool', default=False),
- bucket_lifecycle_configuration=dict(type='bool', default=False),
- bucket_location=dict(type='bool', default=False),
- bucket_logging=dict(type='bool', default=False),
- bucket_notification_configuration=dict(type='bool', default=False),
- bucket_ownership_controls=dict(type='bool', default=False),
- bucket_policy=dict(type='bool', default=False),
- bucket_policy_status=dict(type='bool', default=False),
- bucket_replication=dict(type='bool', default=False),
- bucket_request_payment=dict(type='bool', default=False),
- bucket_tagging=dict(type='bool', default=False),
- bucket_website=dict(type='bool', default=False),
- public_access_block=dict(type='bool', default=False),
- )),
- transform_location=dict(type='bool', default=False)
- )
-
- # Ensure we have an empty dict
- result = {}
-
- # Define mutually exclusive options
- mutually_exclusive = [
- ['name', 'name_filter']
- ]
-
- # Including ec2 argument spec
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
-
- # Get parameters
- name = module.params.get("name")
- name_filter = module.params.get("name_filter")
- requested_facts = module.params.get("bucket_facts")
- transform_location = module.params.get("bucket_facts")
-
- # Set up connection
- connection = {}
- try:
- connection = module.client('s3')
- except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code:
- module.fail_json_aws(err_code, msg='Failed to connect to AWS')
-
- # Get basic bucket list (name + creation date)
- bucket_list = get_bucket_list(module, connection, name, name_filter)
-
- # Add information about name/name_filter to result
- if name:
- result['bucket_name'] = name
- elif name_filter:
- result['bucket_name_filter'] = name_filter
-
- # Gather detailed information about buckets if requested
- bucket_facts = module.params.get("bucket_facts")
- if bucket_facts:
- result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location)
- else:
- result['buckets'] = bucket_list
-
- module.exit_json(msg="Retrieved s3 info.", **result)
-
-
-# MAIN
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py b/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
index 645ca6989..1045164dc 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_bucket_notification.py
@@ -1,15 +1,11 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# (c) 2019, XLAB d.o.o <www.xlab.si>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: s3_bucket_notification
version_added: 1.0.0
@@ -104,12 +100,12 @@ options:
type: str
default: ''
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
# Examples adding notification target configs to a S3 bucket
- name: Setup bucket event notification to a Lambda function
@@ -138,9 +134,9 @@ EXAMPLES = r'''
state: absent
event_name: on_file_add_or_remove
bucket_name: test-bucket
-'''
+"""
-RETURN = r'''
+RETURN = r"""
notification_configuration:
description: dictionary of currently applied notifications
returned: success
@@ -158,51 +154,50 @@ notification_configuration:
description:
- List of current SNS notification configurations applied to the bucket.
type: list
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # will be protected by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class AmazonBucket:
def __init__(self, module, client):
self.module = module
self.client = client
- self.bucket_name = module.params['bucket_name']
+ self.bucket_name = module.params["bucket_name"]
self.check_mode = module.check_mode
self._full_config_cache = None
def full_config(self):
if self._full_config_cache is None:
self._full_config_cache = dict(
- QueueConfigurations=[],
- TopicConfigurations=[],
- LambdaFunctionConfigurations=[]
+ QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[]
)
try:
- config_lookup = self.client.get_bucket_notification_configuration(
- Bucket=self.bucket_name)
+ config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name)
except (ClientError, BotoCoreError) as e:
- self.module.fail_json(msg='{0}'.format(e))
+ self.module.fail_json(msg=f"{e}")
# Handle different event targets
- if config_lookup.get('QueueConfigurations'):
- for queue_config in config_lookup.get('QueueConfigurations'):
- self._full_config_cache['QueueConfigurations'].append(Config.from_api(queue_config))
+ if config_lookup.get("QueueConfigurations"):
+ for queue_config in config_lookup.get("QueueConfigurations"):
+ self._full_config_cache["QueueConfigurations"].append(Config.from_api(queue_config))
- if config_lookup.get('TopicConfigurations'):
- for topic_config in config_lookup.get('TopicConfigurations'):
- self._full_config_cache['TopicConfigurations'].append(Config.from_api(topic_config))
+ if config_lookup.get("TopicConfigurations"):
+ for topic_config in config_lookup.get("TopicConfigurations"):
+ self._full_config_cache["TopicConfigurations"].append(Config.from_api(topic_config))
- if config_lookup.get('LambdaFunctionConfigurations'):
- for function_config in config_lookup.get('LambdaFunctionConfigurations'):
- self._full_config_cache['LambdaFunctionConfigurations'].append(Config.from_api(function_config))
+ if config_lookup.get("LambdaFunctionConfigurations"):
+ for function_config in config_lookup.get("LambdaFunctionConfigurations"):
+ self._full_config_cache["LambdaFunctionConfigurations"].append(Config.from_api(function_config))
return self._full_config_cache
@@ -210,70 +205,59 @@ class AmazonBucket:
# Iterate through configs and get current event config
for target_configs in self.full_config():
for config in self.full_config()[target_configs]:
- if config.raw['Id'] == config_name:
+ if config.raw["Id"] == config_name:
return config
def apply_config(self, desired):
- configs = dict(
- QueueConfigurations=[],
- TopicConfigurations=[],
- LambdaFunctionConfigurations=[]
- )
+ configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[])
# Iterate through existing configs then add the desired config
for target_configs in self.full_config():
for config in self.full_config()[target_configs]:
- if config.name != desired.raw['Id']:
+ if config.name != desired.raw["Id"]:
configs[target_configs].append(config.raw)
- if self.module.params.get('queue_arn'):
- configs['QueueConfigurations'].append(desired.raw)
- if self.module.params.get('topic_arn'):
- configs['TopicConfigurations'].append(desired.raw)
- if self.module.params.get('lambda_function_arn'):
- configs['LambdaFunctionConfigurations'].append(desired.raw)
+ if self.module.params.get("queue_arn"):
+ configs["QueueConfigurations"].append(desired.raw)
+ if self.module.params.get("topic_arn"):
+ configs["TopicConfigurations"].append(desired.raw)
+ if self.module.params.get("lambda_function_arn"):
+ configs["LambdaFunctionConfigurations"].append(desired.raw)
self._upload_bucket_config(configs)
return configs
def delete_config(self, desired):
- configs = dict(
- QueueConfigurations=[],
- TopicConfigurations=[],
- LambdaFunctionConfigurations=[]
- )
+ configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[])
# Iterate through existing configs omitting specified config
for target_configs in self.full_config():
for config in self.full_config()[target_configs]:
- if config.name != desired.raw['Id']:
+ if config.name != desired.raw["Id"]:
configs[target_configs].append(config.raw)
self._upload_bucket_config(configs)
return configs
def _upload_bucket_config(self, configs):
- api_params = dict(
- Bucket=self.bucket_name,
- NotificationConfiguration=dict()
- )
+ api_params = dict(Bucket=self.bucket_name, NotificationConfiguration=dict())
# Iterate through available configs
for target_configs in configs:
if len(configs[target_configs]) > 0:
- api_params['NotificationConfiguration'][target_configs] = configs[target_configs]
+ api_params["NotificationConfiguration"][target_configs] = configs[target_configs]
if not self.check_mode:
try:
self.client.put_bucket_notification_configuration(**api_params)
except (ClientError, BotoCoreError) as e:
- self.module.fail_json(msg='{0}'.format(e))
+ self.module.fail_json(msg=f"{e}")
class Config:
def __init__(self, content):
self._content = content
- self.name = content.get('Id')
+ self.name = content.get("Id")
@property
def raw(self):
@@ -289,41 +273,35 @@ class Config:
"""Generate bucket notification params for target"""
bucket_event_params = dict(
- Id=params['event_name'],
- Events=sorted(params['events']),
+ Id=params["event_name"],
+ Events=sorted(params["events"]),
Filter=dict(
Key=dict(
FilterRules=[
- dict(
- Name='Prefix',
- Value=params['prefix']
- ),
- dict(
- Name='Suffix',
- Value=params['suffix']
- )
+ dict(Name="Prefix", Value=params["prefix"]),
+ dict(Name="Suffix", Value=params["suffix"]),
]
)
- )
+ ),
)
# Handle different event targets
- if params.get('queue_arn'):
- bucket_event_params['QueueArn'] = params['queue_arn']
- if params.get('topic_arn'):
- bucket_event_params['TopicArn'] = params['topic_arn']
- if params.get('lambda_function_arn'):
- function_arn = params['lambda_function_arn']
+ if params.get("queue_arn"):
+ bucket_event_params["QueueArn"] = params["queue_arn"]
+ if params.get("topic_arn"):
+ bucket_event_params["TopicArn"] = params["topic_arn"]
+ if params.get("lambda_function_arn"):
+ function_arn = params["lambda_function_arn"]
qualifier = None
- if params['lambda_version'] > 0:
- qualifier = str(params['lambda_version'])
- elif params['lambda_alias']:
- qualifier = str(params['lambda_alias'])
+ if params["lambda_version"] > 0:
+ qualifier = str(params["lambda_version"])
+ elif params["lambda_alias"]:
+ qualifier = str(params["lambda_alias"])
if qualifier:
- params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+ params["lambda_function_arn"] = f"{function_arn}:{qualifier}"
- bucket_event_params['LambdaFunctionArn'] = params['lambda_function_arn']
+ bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"]
return cls(bucket_event_params)
@@ -333,66 +311,70 @@ class Config:
def setup_module_object():
- event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
- 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
- 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
- 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
- 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
+ event_types = [
+ "s3:ObjectCreated:*",
+ "s3:ObjectCreated:Put",
+ "s3:ObjectCreated:Post",
+ "s3:ObjectCreated:Copy",
+ "s3:ObjectCreated:CompleteMultipartUpload",
+ "s3:ObjectRemoved:*",
+ "s3:ObjectRemoved:Delete",
+ "s3:ObjectRemoved:DeleteMarkerCreated",
+ "s3:ObjectRestore:Post",
+ "s3:ObjectRestore:Completed",
+ "s3:ReducedRedundancyLostObject",
+ ]
argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
+ state=dict(default="present", choices=["present", "absent"]),
event_name=dict(required=True),
- lambda_function_arn=dict(aliases=['function_arn']),
- queue_arn=dict(type='str'),
- topic_arn=dict(type='str'),
+ lambda_function_arn=dict(aliases=["function_arn"]),
+ queue_arn=dict(type="str"),
+ topic_arn=dict(type="str"),
bucket_name=dict(required=True),
- events=dict(type='list', default=[], choices=event_types, elements='str'),
- prefix=dict(default=''),
- suffix=dict(default=''),
+ events=dict(type="list", default=[], choices=event_types, elements="str"),
+ prefix=dict(default=""),
+ suffix=dict(default=""),
lambda_alias=dict(),
- lambda_version=dict(type='int', default=0),
+ lambda_version=dict(type="int", default=0),
)
mutually_exclusive = [
- ['queue_arn', 'topic_arn', 'lambda_function_arn'],
- ['lambda_alias', 'lambda_version']
+ ["queue_arn", "topic_arn", "lambda_function_arn"],
+ ["lambda_alias", "lambda_version"],
]
return AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
- required_if=[['state', 'present', ['events']]]
+ required_if=[["state", "present", ["events"]]],
)
def main():
module = setup_module_object()
- client = module.client('s3')
+ client = module.client("s3")
bucket = AmazonBucket(module, client)
- current = bucket.current_config(module.params['event_name'])
+ current = bucket.current_config(module.params["event_name"])
desired = Config.from_params(**module.params)
- notification_configs = dict(
- QueueConfigurations=[],
- TopicConfigurations=[],
- LambdaFunctionConfigurations=[]
- )
+ notification_configs = dict(QueueConfigurations=[], TopicConfigurations=[], LambdaFunctionConfigurations=[])
for target_configs in bucket.full_config():
for cfg in bucket.full_config()[target_configs]:
notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg.raw))
- state = module.params['state']
+ state = module.params["state"]
updated_configuration = dict()
changed = False
- if state == 'present':
+ if state == "present":
if current != desired:
updated_configuration = bucket.apply_config(desired)
changed = True
- elif state == 'absent':
+ elif state == "absent":
if current:
updated_configuration = bucket.delete_config(desired)
changed = True
@@ -402,9 +384,8 @@ def main():
for cfg in updated_configuration.get(target_configs, list()):
notification_configs[target_configs].append(camel_dict_to_snake_dict(cfg))
- module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(
- notification_configs))
+ module.exit_json(changed=changed, notification_configuration=camel_dict_to_snake_dict(notification_configs))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_cors.py b/ansible_collections/community/aws/plugins/modules/s3_cors.py
index 753e395f9..d153c7df8 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_cors.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_cors.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: s3_cors
version_added: 1.0.0
@@ -36,12 +33,12 @@ options:
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a simple cors for s3 bucket
@@ -65,9 +62,9 @@ EXAMPLES = r'''
- community.aws.s3_cors:
name: mys3bucket
state: absent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
changed:
description: check to see if a change was made to the rules
returned: always
@@ -96,25 +93,28 @@ rules:
"max_age_seconds": 30000
}
]
-'''
+"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
-def create_or_update_bucket_cors(connection, module):
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+def create_or_update_bucket_cors(connection, module):
name = module.params.get("name")
rules = module.params.get("rules", [])
changed = False
try:
- current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules']
+ current_camel_rules = connection.get_bucket_cors(Bucket=name)["CORSRules"]
except ClientError:
current_camel_rules = []
@@ -125,15 +125,14 @@ def create_or_update_bucket_cors(connection, module):
if changed:
try:
- cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules})
+ cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules})
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name))
+ module.fail_json_aws(e, msg=f"Unable to update CORS for bucket {name}")
module.exit_json(changed=changed, name=name, rules=rules)
def destroy_bucket_cors(connection, module):
-
name = module.params.get("name")
changed = False
@@ -141,30 +140,29 @@ def destroy_bucket_cors(connection, module):
cors = connection.delete_bucket_cors(Bucket=name)
changed = True
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name))
+ module.fail_json_aws(e, msg=f"Unable to delete CORS for bucket {name}")
module.exit_json(changed=changed)
def main():
-
argument_spec = dict(
- name=dict(required=True, type='str'),
- rules=dict(type='list', elements='dict'),
- state=dict(type='str', choices=['present', 'absent'], required=True)
+ name=dict(required=True, type="str"),
+ rules=dict(type="list", elements="dict"),
+ state=dict(type="str", choices=["present", "absent"], required=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- client = module.client('s3')
+ client = module.client("s3")
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
create_or_update_bucket_cors(client, module)
- elif state == 'absent':
+ elif state == "absent":
destroy_bucket_cors(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py b/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
index 660bca869..2f48e06d4 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_lifecycle.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: s3_lifecycle
version_added: 1.0.0
short_description: Manage S3 bucket lifecycle rules in AWS
description:
- - Manage S3 bucket lifecycle rules in AWS.
-author: "Rob White (@wimnat)"
+ - Manage S3 bucket lifecycle rules in AWS.
+author:
+ - "Rob White (@wimnat)"
notes:
- If specifying expiration time as days then transition time must also be specified in days.
- If specifying expiration time as a date then transition time must also be specified as a date.
@@ -69,7 +68,6 @@ options:
noncurrent_version_keep_newer:
description:
- The minimum number of non-current versions to retain.
- - Requires C(botocore >= 1.23.12)
- Requres I(noncurrent_version_expiration_days).
required: false
type: int
@@ -149,13 +147,14 @@ options:
type: bool
default: false
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
+RETURN = r""" # """
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
@@ -219,14 +218,15 @@ EXAMPLES = r'''
storage_class: standard_ia
- transition_days: 90
storage_class: glacier
-'''
+"""
-from copy import deepcopy
import datetime
import time
+from copy import deepcopy
try:
from dateutil import parser as date_parser
+
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
@@ -236,11 +236,12 @@ try:
except ImportError:
pass # handled by AnsibleAwsModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
-from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def parse_date(date):
@@ -260,10 +261,13 @@ def fetch_rules(client, module, name):
# Get the bucket's current lifecycle rules
try:
current_lifecycle = client.get_bucket_lifecycle_configuration(aws_retry=True, Bucket=name)
- current_lifecycle_rules = normalize_boto3_result(current_lifecycle['Rules'])
- except is_boto3_error_code('NoSuchLifecycleConfiguration'):
+ current_lifecycle_rules = normalize_boto3_result(current_lifecycle["Rules"])
+ except is_boto3_error_code("NoSuchLifecycleConfiguration"):
current_lifecycle_rules = []
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
return current_lifecycle_rules
@@ -290,35 +294,37 @@ def build_rule(client, module):
rule = dict(Filter=dict(Prefix=prefix), Status=status.title())
if rule_id is not None:
- rule['ID'] = rule_id
+ rule["ID"] = rule_id
if abort_incomplete_multipart_upload_days:
- rule['AbortIncompleteMultipartUpload'] = {
- 'DaysAfterInitiation': abort_incomplete_multipart_upload_days
- }
+ rule["AbortIncompleteMultipartUpload"] = {"DaysAfterInitiation": abort_incomplete_multipart_upload_days}
# Create expiration
if expiration_days is not None:
- rule['Expiration'] = dict(Days=expiration_days)
+ rule["Expiration"] = dict(Days=expiration_days)
elif expiration_date is not None:
- rule['Expiration'] = dict(Date=expiration_date.isoformat())
+ rule["Expiration"] = dict(Date=expiration_date.isoformat())
elif expire_object_delete_marker is not None:
- rule['Expiration'] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker)
+ rule["Expiration"] = dict(ExpiredObjectDeleteMarker=expire_object_delete_marker)
if noncurrent_version_expiration_days or noncurrent_version_keep_newer:
- rule['NoncurrentVersionExpiration'] = dict()
+ rule["NoncurrentVersionExpiration"] = dict()
if noncurrent_version_expiration_days is not None:
- rule['NoncurrentVersionExpiration']['NoncurrentDays'] = noncurrent_version_expiration_days
+ rule["NoncurrentVersionExpiration"]["NoncurrentDays"] = noncurrent_version_expiration_days
if noncurrent_version_keep_newer is not None:
- rule['NoncurrentVersionExpiration']['NewerNoncurrentVersions'] = noncurrent_version_keep_newer
+ rule["NoncurrentVersionExpiration"]["NewerNoncurrentVersions"] = noncurrent_version_keep_newer
if transition_days is not None:
- rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ]
+ rule["Transitions"] = [
+ dict(Days=transition_days, StorageClass=storage_class.upper()),
+ ]
elif transition_date is not None:
- rule['Transitions'] = [dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()), ]
+ rule["Transitions"] = [
+ dict(Date=transition_date.isoformat(), StorageClass=storage_class.upper()),
+ ]
if transitions is not None:
- if not rule.get('Transitions'):
- rule['Transitions'] = []
+ if not rule.get("Transitions"):
+ rule["Transitions"] = []
for transition in transitions:
t_out = dict()
if transition.get("transition_date"):
@@ -330,18 +336,21 @@ def build_rule(client, module):
rule["Transitions"].append(t_out)
if noncurrent_version_transition_days is not None:
- rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days,
- StorageClass=noncurrent_version_storage_class.upper()), ]
+ rule["NoncurrentVersionTransitions"] = [
+ dict(
+ NoncurrentDays=noncurrent_version_transition_days, StorageClass=noncurrent_version_storage_class.upper()
+ ),
+ ]
if noncurrent_version_transitions is not None:
- if not rule.get('NoncurrentVersionTransitions'):
- rule['NoncurrentVersionTransitions'] = []
+ if not rule.get("NoncurrentVersionTransitions"):
+ rule["NoncurrentVersionTransitions"] = []
for noncurrent_version_transition in noncurrent_version_transitions:
t_out = dict()
- t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days']
- if noncurrent_version_transition.get('storage_class'):
- t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper()
- rule['NoncurrentVersionTransitions'].append(t_out)
+ t_out["NoncurrentDays"] = noncurrent_version_transition["transition_days"]
+ if noncurrent_version_transition.get("storage_class"):
+ t_out["StorageClass"] = noncurrent_version_transition["storage_class"].upper()
+ rule["NoncurrentVersionTransitions"].append(t_out)
return rule
@@ -358,23 +367,29 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru
if current_lifecycle_rules:
# If rule ID exists, use that for comparison otherwise compare based on prefix
for existing_rule in current_lifecycle_rules:
- if rule.get('ID') == existing_rule.get('ID') and rule['Filter'].get('Prefix', '') != existing_rule.get('Filter', {}).get('Prefix', ''):
- existing_rule.pop('ID')
- elif rule_id is None and rule['Filter'].get('Prefix', '') == existing_rule.get('Filter', {}).get('Prefix', ''):
- existing_rule.pop('ID')
- if rule.get('ID') == existing_rule.get('ID'):
- changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration)
+ if rule.get("ID") == existing_rule.get("ID") and rule["Filter"].get("Prefix", "") != existing_rule.get(
+ "Filter", {}
+ ).get("Prefix", ""):
+ existing_rule.pop("ID")
+ elif rule_id is None and rule["Filter"].get("Prefix", "") == existing_rule.get("Filter", {}).get(
+ "Prefix", ""
+ ):
+ existing_rule.pop("ID")
+ if rule.get("ID") == existing_rule.get("ID"):
+ changed_, appended_ = update_or_append_rule(
+ rule, existing_rule, purge_transitions, lifecycle_configuration
+ )
changed = changed_ or changed
appended = appended_ or appended
else:
- lifecycle_configuration['Rules'].append(existing_rule)
+ lifecycle_configuration["Rules"].append(existing_rule)
# If nothing appended then append now as the rule must not exist
if not appended:
- lifecycle_configuration['Rules'].append(rule)
+ lifecycle_configuration["Rules"].append(rule)
changed = True
else:
- lifecycle_configuration['Rules'].append(rule)
+ lifecycle_configuration["Rules"].append(rule)
changed = True
return changed, lifecycle_configuration
@@ -382,24 +397,24 @@ def compare_and_update_configuration(client, module, current_lifecycle_rules, ru
def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj):
changed = False
- if existing_rule['Status'] != new_rule['Status']:
- if not new_rule.get('Transitions') and existing_rule.get('Transitions'):
- new_rule['Transitions'] = existing_rule['Transitions']
- if not new_rule.get('Expiration') and existing_rule.get('Expiration'):
- new_rule['Expiration'] = existing_rule['Expiration']
- if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'):
- new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration']
- lifecycle_obj['Rules'].append(new_rule)
+ if existing_rule["Status"] != new_rule["Status"]:
+ if not new_rule.get("Transitions") and existing_rule.get("Transitions"):
+ new_rule["Transitions"] = existing_rule["Transitions"]
+ if not new_rule.get("Expiration") and existing_rule.get("Expiration"):
+ new_rule["Expiration"] = existing_rule["Expiration"]
+ if not new_rule.get("NoncurrentVersionExpiration") and existing_rule.get("NoncurrentVersionExpiration"):
+ new_rule["NoncurrentVersionExpiration"] = existing_rule["NoncurrentVersionExpiration"]
+ lifecycle_obj["Rules"].append(new_rule)
changed = True
appended = True
else:
if not purge_transitions:
merge_transitions(new_rule, existing_rule)
if compare_rule(new_rule, existing_rule, purge_transitions):
- lifecycle_obj['Rules'].append(new_rule)
+ lifecycle_obj["Rules"].append(new_rule)
appended = True
else:
- lifecycle_obj['Rules'].append(new_rule)
+ lifecycle_obj["Rules"].append(new_rule)
changed = True
appended = True
return changed, appended
@@ -413,24 +428,23 @@ def compare_and_remove_rule(current_lifecycle_rules, rule_id=None, prefix=None):
# If an ID exists, use that otherwise compare based on prefix
if rule_id is not None:
for existing_rule in current_lifecycle_rules:
- if rule_id == existing_rule['ID']:
+ if rule_id == existing_rule["ID"]:
# We're not keeping the rule (i.e. deleting) so mark as changed
changed = True
else:
- lifecycle_configuration['Rules'].append(existing_rule)
+ lifecycle_configuration["Rules"].append(existing_rule)
else:
for existing_rule in current_lifecycle_rules:
- if prefix == existing_rule['Filter'].get('Prefix', ''):
+ if prefix == existing_rule["Filter"].get("Prefix", ""):
# We're not keeping the rule (i.e. deleting) so mark as changed
changed = True
else:
- lifecycle_configuration['Rules'].append(existing_rule)
+ lifecycle_configuration["Rules"].append(existing_rule)
return changed, lifecycle_configuration
def compare_rule(new_rule, old_rule, purge_transitions):
-
# Copy objects
rule1 = deepcopy(new_rule)
rule2 = deepcopy(old_rule)
@@ -438,10 +452,10 @@ def compare_rule(new_rule, old_rule, purge_transitions):
if purge_transitions:
return rule1 == rule2
else:
- transitions1 = rule1.pop('Transitions', [])
- transitions2 = rule2.pop('Transitions', [])
- noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', [])
- noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', [])
+ transitions1 = rule1.pop("Transitions", [])
+ transitions2 = rule2.pop("Transitions", [])
+ noncurrent_transtions1 = rule1.pop("NoncurrentVersionTransitions", [])
+ noncurrent_transtions2 = rule2.pop("NoncurrentVersionTransitions", [])
if rule1 != rule2:
return False
for transition in transitions1:
@@ -459,39 +473,39 @@ def merge_transitions(updated_rule, updating_rule):
# in updating_rule to updated_rule
updated_transitions = {}
updating_transitions = {}
- for transition in updated_rule.get('Transitions', []):
- updated_transitions[transition['StorageClass']] = transition
- for transition in updating_rule.get('Transitions', []):
- updating_transitions[transition['StorageClass']] = transition
+ for transition in updated_rule.get("Transitions", []):
+ updated_transitions[transition["StorageClass"]] = transition
+ for transition in updating_rule.get("Transitions", []):
+ updating_transitions[transition["StorageClass"]] = transition
for storage_class, transition in updating_transitions.items():
if updated_transitions.get(storage_class) is None:
- updated_rule['Transitions'].append(transition)
+ updated_rule["Transitions"].append(transition)
def create_lifecycle_rule(client, module):
-
name = module.params.get("name")
wait = module.params.get("wait")
changed = False
old_lifecycle_rules = fetch_rules(client, module, name)
new_rule = build_rule(client, module)
- (changed, lifecycle_configuration) = compare_and_update_configuration(client, module,
- old_lifecycle_rules,
- new_rule)
+ (changed, lifecycle_configuration) = compare_and_update_configuration(client, module, old_lifecycle_rules, new_rule)
if changed:
# Write lifecycle to bucket
try:
client.put_bucket_lifecycle_configuration(
- aws_retry=True,
- Bucket=name,
- LifecycleConfiguration=lifecycle_configuration,
+ aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_configuration
)
except is_boto3_error_message("At least one action needs to be specified in a rule"):
# Amazon interpreted this as not changing anything
changed = False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules)
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(
+ e, lifecycle_configuration=lifecycle_configuration, name=name, old_lifecycle_rules=old_lifecycle_rules
+ )
_changed = changed
_retries = 10
@@ -504,9 +518,7 @@ def create_lifecycle_rule(client, module):
time.sleep(5)
_retries -= 1
new_rules = fetch_rules(client, module, name)
- (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module,
- new_rules,
- new_rule)
+ (_changed, lifecycle_configuration) = compare_and_update_configuration(client, module, new_rules, new_rule)
if not _changed:
_not_changed_cnt -= 1
_changed = True
@@ -517,13 +529,17 @@ def create_lifecycle_rule(client, module):
new_rules = fetch_rules(client, module, name)
- module.exit_json(changed=changed, new_rule=new_rule, rules=new_rules,
- old_rules=old_lifecycle_rules, _retries=_retries,
- _config=lifecycle_configuration)
+ module.exit_json(
+ changed=changed,
+ new_rule=new_rule,
+ rules=new_rules,
+ old_rules=old_lifecycle_rules,
+ _retries=_retries,
+ _config=lifecycle_configuration,
+ )
def destroy_lifecycle_rule(client, module):
-
name = module.params.get("name")
prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id")
@@ -539,11 +555,10 @@ def destroy_lifecycle_rule(client, module):
if changed:
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
try:
- if lifecycle_obj['Rules']:
+ if lifecycle_obj["Rules"]:
client.put_bucket_lifecycle_configuration(
- aws_retry=True,
- Bucket=name,
- LifecycleConfiguration=lifecycle_obj)
+ aws_retry=True, Bucket=name, LifecycleConfiguration=lifecycle_obj
+ )
elif current_lifecycle_rules:
changed = True
client.delete_bucket_lifecycle(aws_retry=True, Bucket=name)
@@ -572,33 +587,32 @@ def destroy_lifecycle_rule(client, module):
new_rules = fetch_rules(client, module, name)
- module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules,
- _retries=_retries)
+ module.exit_json(changed=changed, rules=new_rules, old_rules=current_lifecycle_rules, _retries=_retries)
def main():
- s3_storage_class = ['glacier', 'onezone_ia', 'standard_ia', 'intelligent_tiering', 'deep_archive']
+ s3_storage_class = ["glacier", "onezone_ia", "standard_ia", "intelligent_tiering", "deep_archive"]
argument_spec = dict(
- name=dict(required=True, type='str'),
- abort_incomplete_multipart_upload_days=dict(type='int'),
- expiration_days=dict(type='int'),
+ name=dict(required=True, type="str"),
+ abort_incomplete_multipart_upload_days=dict(type="int"),
+ expiration_days=dict(type="int"),
expiration_date=dict(),
- expire_object_delete_marker=dict(type='bool'),
- noncurrent_version_expiration_days=dict(type='int'),
- noncurrent_version_keep_newer=dict(type='int'),
- noncurrent_version_storage_class=dict(default='glacier', type='str', choices=s3_storage_class),
- noncurrent_version_transition_days=dict(type='int'),
- noncurrent_version_transitions=dict(type='list', elements='dict'),
+ expire_object_delete_marker=dict(type="bool"),
+ noncurrent_version_expiration_days=dict(type="int"),
+ noncurrent_version_keep_newer=dict(type="int"),
+ noncurrent_version_storage_class=dict(default="glacier", type="str", choices=s3_storage_class),
+ noncurrent_version_transition_days=dict(type="int"),
+ noncurrent_version_transitions=dict(type="list", elements="dict"),
prefix=dict(),
rule_id=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- status=dict(default='enabled', choices=['enabled', 'disabled']),
- storage_class=dict(default='glacier', type='str', choices=s3_storage_class),
- transition_days=dict(type='int'),
+ state=dict(default="present", choices=["present", "absent"]),
+ status=dict(default="enabled", choices=["enabled", "disabled"]),
+ storage_class=dict(default="glacier", type="str", choices=s3_storage_class),
+ transition_days=dict(type="int"),
transition_date=dict(),
- transitions=dict(type='list', elements='dict'),
- purge_transitions=dict(default=True, type='bool'),
- wait=dict(type='bool', default=False)
+ transitions=dict(type="list", elements="dict"),
+ purge_transitions=dict(default=True, type="bool"),
+ wait=dict(type="bool", default=False),
)
module = AnsibleAWSModule(
@@ -617,51 +631,54 @@ def main():
},
)
- client = module.client('s3', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("s3", retry_decorator=AWSRetry.jittered_backoff())
expiration_date = module.params.get("expiration_date")
transition_date = module.params.get("transition_date")
state = module.params.get("state")
- if module.params.get("noncurrent_version_keep_newer"):
- module.require_botocore_at_least(
- "1.23.12",
- reason="to set number of versions to keep with noncurrent_version_keep_newer"
+ if state == "present" and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix
+ required_when_present = (
+ "abort_incomplete_multipart_upload_days",
+ "expiration_date",
+ "expiration_days",
+ "expire_object_delete_marker",
+ "transition_date",
+ "transition_days",
+ "transitions",
+ "noncurrent_version_expiration_days",
+ "noncurrent_version_keep_newer",
+ "noncurrent_version_transition_days",
+ "noncurrent_version_transitions",
)
-
- if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix
-
- required_when_present = ('abort_incomplete_multipart_upload_days',
- 'expiration_date', 'expiration_days', 'expire_object_delete_marker',
- 'transition_date', 'transition_days', 'transitions',
- 'noncurrent_version_expiration_days',
- 'noncurrent_version_keep_newer',
- 'noncurrent_version_transition_days',
- 'noncurrent_version_transitions')
for param in required_when_present:
if module.params.get(param) is None:
break
else:
- msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present)
+ msg = f"one of the following is required when 'state' is 'present': {', '.join(required_when_present)}"
module.fail_json(msg=msg)
# If dates have been set, make sure they're in a valid format
if expiration_date:
expiration_date = parse_date(expiration_date)
if expiration_date is None:
- module.fail_json(msg="expiration_date is not a valid ISO-8601 format."
- " The time must be midnight and a timezone of GMT must be included")
+ module.fail_json(
+ msg="expiration_date is not a valid ISO-8601 format."
+ " The time must be midnight and a timezone of GMT must be included"
+ )
if transition_date:
transition_date = parse_date(transition_date)
if transition_date is None:
- module.fail_json(msg="transition_date is not a valid ISO-8601 format."
- " The time must be midnight and a timezone of GMT must be included")
+ module.fail_json(
+ msg="transition_date is not a valid ISO-8601 format."
+ " The time must be midnight and a timezone of GMT must be included"
+ )
- if state == 'present':
+ if state == "present":
create_lifecycle_rule(client, module)
- elif state == 'absent':
+ elif state == "absent":
destroy_lifecycle_rule(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_logging.py b/ansible_collections/community/aws/plugins/modules/s3_logging.py
index 011baa951..3a7874994 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_logging.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_logging.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: s3_logging
version_added: 1.0.0
short_description: Manage logging facility of an s3 bucket in AWS
description:
- - Manage logging facility of an s3 bucket in AWS
-author: Rob White (@wimnat)
+ - Manage logging facility of an s3 bucket in AWS
+author:
+ - Rob White (@wimnat)
options:
name:
description:
@@ -36,13 +35,14 @@ options:
default: ""
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
+RETURN = r""" # """
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
@@ -56,32 +56,31 @@ EXAMPLES = '''
community.aws.s3_logging:
name: mywebsite.com
state: absent
-
-'''
+"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-def compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
- if not bucket_logging.get('LoggingEnabled', False):
+def compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
+ if not bucket_logging.get("LoggingEnabled", False):
if target_bucket:
return True
return False
- logging = bucket_logging['LoggingEnabled']
- if logging['TargetBucket'] != target_bucket:
+ logging = bucket_logging["LoggingEnabled"]
+ if logging["TargetBucket"] != target_bucket:
return True
- if logging['TargetPrefix'] != target_prefix:
+ if logging["TargetPrefix"] != target_prefix:
return True
return False
@@ -89,18 +88,18 @@ def compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
def verify_acls(connection, module, target_bucket):
try:
current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket)
- current_grants = current_acl['Grants']
- except is_boto3_error_code('NoSuchBucket'):
- module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ current_grants = current_acl["Grants"]
+ except is_boto3_error_code("NoSuchBucket"):
+ module.fail_json(msg=f"Target Bucket '{target_bucket}' not found")
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to fetch target bucket ACL")
required_grant = {
- 'Grantee': {
- 'URI': "http://acs.amazonaws.com/groups/s3/LogDelivery",
- 'Type': 'Group'
- },
- 'Permission': 'FULL_CONTROL'
+ "Grantee": {"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", "Type": "Group"},
+ "Permission": "FULL_CONTROL",
}
for grant in current_grants:
@@ -113,8 +112,8 @@ def verify_acls(connection, module, target_bucket):
updated_acl = dict(current_acl)
updated_grants = list(current_grants)
updated_grants.append(required_grant)
- updated_acl['Grants'] = updated_grants
- del updated_acl['ResponseMetadata']
+ updated_acl["Grants"] = updated_grants
+ del updated_acl["ResponseMetadata"]
try:
connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
@@ -124,7 +123,6 @@ def verify_acls(connection, module, target_bucket):
def enable_bucket_logging(connection, module):
-
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
@@ -132,9 +130,12 @@ def enable_bucket_logging(connection, module):
try:
bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name)
- except is_boto3_error_code('NoSuchBucket'):
- module.fail_json(msg="Bucket '{0}' not found".format(bucket_name))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
+ except is_boto3_error_code("NoSuchBucket"):
+ module.fail_json(msg=f"Bucket '{bucket_name}' not found")
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to fetch current logging status")
try:
@@ -151,11 +152,12 @@ def enable_bucket_logging(connection, module):
aws_retry=True,
Bucket=bucket_name,
BucketLoggingStatus={
- 'LoggingEnabled': {
- 'TargetBucket': target_bucket,
- 'TargetPrefix': target_prefix,
+ "LoggingEnabled": {
+ "TargetBucket": target_bucket,
+ "TargetPrefix": target_prefix,
}
- })
+ },
+ )
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to enable bucket logging")
@@ -165,7 +167,6 @@ def enable_bucket_logging(connection, module):
def disable_bucket_logging(connection, module):
-
bucket_name = module.params.get("name")
changed = False
@@ -181,11 +182,9 @@ def disable_bucket_logging(connection, module):
module.exit_json(changed=True)
try:
- response = AWSRetry.jittered_backoff(
- catch_extra_error_codes=['InvalidTargetBucketForLogging']
- )(connection.put_bucket_logging)(
- Bucket=bucket_name, BucketLoggingStatus={}
- )
+ response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])(
+ connection.put_bucket_logging
+ )(Bucket=bucket_name, BucketLoggingStatus={})
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to disable bucket logging")
@@ -193,24 +192,23 @@ def disable_bucket_logging(connection, module):
def main():
-
argument_spec = dict(
name=dict(required=True),
target_bucket=dict(required=False, default=None),
target_prefix=dict(required=False, default=""),
- state=dict(required=False, default='present', choices=['present', 'absent']),
+ state=dict(required=False, default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff())
+ connection = module.client("s3", retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
enable_bucket_logging(connection, module)
- elif state == 'absent':
+ elif state == "absent":
disable_bucket_logging(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py b/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
index dff566821..4e62b7bf8 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_metrics_configuration.py
@@ -1,23 +1,22 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: s3_metrics_configuration
version_added: 1.3.0
short_description: Manage s3 bucket metrics configuration in AWS
description:
- - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket
-author: Dmytro Vorotyntsev (@vorotech)
+ - Manage s3 bucket metrics configuration in AWS which allows to get the CloudWatch request metrics for the objects in a bucket
+author:
+ - Dmytro Vorotyntsev (@vorotech)
notes:
- - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations
- - To request metrics for the entire bucket, create a metrics configuration without a filter
- - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on
+ - This modules manages single metrics configuration, the s3 bucket might have up to 1,000 metrics configurations
+ - To request metrics for the entire bucket, create a metrics configuration without a filter
+ - Metrics configurations are necessary only to enable request metric, bucket-level daily storage metrics are always turned on
options:
bucket_name:
description:
@@ -48,13 +47,14 @@ options:
choices: ['present', 'absent']
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
+RETURN = r""" # """
-EXAMPLES = r'''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create a metrics configuration that enables metrics for an entire bucket
@@ -93,56 +93,47 @@ EXAMPLES = r'''
bucket_name: my-bucket
id: EntireBucket
state: absent
-
-'''
+"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def _create_metrics_configuration(mc_id, filter_prefix, filter_tags):
- payload = {
- 'Id': mc_id
- }
+ payload = {"Id": mc_id}
# Just a filter_prefix or just a single tag filter is a special case
if filter_prefix and not filter_tags:
- payload['Filter'] = {
- 'Prefix': filter_prefix
- }
+ payload["Filter"] = {"Prefix": filter_prefix}
elif not filter_prefix and len(filter_tags) == 1:
- payload['Filter'] = {
- 'Tag': ansible_dict_to_boto3_tag_list(filter_tags)[0]
- }
+ payload["Filter"] = {"Tag": ansible_dict_to_boto3_tag_list(filter_tags)[0]}
# Otherwise we need to use 'And'
elif filter_tags:
- payload['Filter'] = {
- 'And': {
- 'Tags': ansible_dict_to_boto3_tag_list(filter_tags)
- }
- }
+ payload["Filter"] = {"And": {"Tags": ansible_dict_to_boto3_tag_list(filter_tags)}}
if filter_prefix:
- payload['Filter']['And']['Prefix'] = filter_prefix
+ payload["Filter"]["And"]["Prefix"] = filter_prefix
return payload
def create_or_update_metrics_configuration(client, module):
- bucket_name = module.params.get('bucket_name')
- mc_id = module.params.get('id')
- filter_prefix = module.params.get('filter_prefix')
- filter_tags = module.params.get('filter_tags')
+ bucket_name = module.params.get("bucket_name")
+ mc_id = module.params.get("id")
+ filter_prefix = module.params.get("filter_prefix")
+ filter_tags = module.params.get("filter_tags")
try:
response = client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
- metrics_configuration = response['MetricsConfiguration']
- except is_boto3_error_code('NoSuchConfiguration'):
+ metrics_configuration = response["MetricsConfiguration"]
+ except is_boto3_error_code("NoSuchConfiguration"):
metrics_configuration = None
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to get bucket metrics configuration")
@@ -158,24 +149,21 @@ def create_or_update_metrics_configuration(client, module):
try:
client.put_bucket_metrics_configuration(
- aws_retry=True,
- Bucket=bucket_name,
- Id=mc_id,
- MetricsConfiguration=new_configuration
+ aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration
)
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id)
+ module.fail_json_aws(e, msg=f"Failed to put bucket metrics configuration '{mc_id}'")
module.exit_json(changed=True)
def delete_metrics_configuration(client, module):
- bucket_name = module.params.get('bucket_name')
- mc_id = module.params.get('id')
+ bucket_name = module.params.get("bucket_name")
+ mc_id = module.params.get("id")
try:
client.get_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
- except is_boto3_error_code('NoSuchConfiguration'):
+ except is_boto3_error_code("NoSuchConfiguration"):
module.exit_json(changed=False)
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to get bucket metrics configuration")
@@ -185,39 +173,36 @@ def delete_metrics_configuration(client, module):
try:
client.delete_bucket_metrics_configuration(aws_retry=True, Bucket=bucket_name, Id=mc_id)
- except is_boto3_error_code('NoSuchConfiguration'):
+ except is_boto3_error_code("NoSuchConfiguration"):
module.exit_json(changed=False)
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id)
+ module.fail_json_aws(e, msg=f"Failed to delete bucket metrics configuration '{mc_id}'")
module.exit_json(changed=True)
def main():
argument_spec = dict(
- bucket_name=dict(type='str', required=True),
- id=dict(type='str', required=True),
- filter_prefix=dict(type='str', required=False),
- filter_tags=dict(default={}, type='dict', required=False, aliases=['filter_tag']),
- state=dict(default='present', type='str', choices=['present', 'absent']),
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
+ bucket_name=dict(type="str", required=True),
+ id=dict(type="str", required=True),
+ filter_prefix=dict(type="str", required=False),
+ filter_tags=dict(default={}, type="dict", required=False, aliases=["filter_tag"]),
+ state=dict(default="present", type="str", choices=["present", "absent"]),
)
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- state = module.params.get('state')
+ state = module.params.get("state")
try:
- client = module.client('s3', retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3))
+ client = module.client("s3", retry_decorator=AWSRetry.exponential_backoff(retries=10, delay=3))
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- if state == 'present':
+ if state == "present":
create_or_update_metrics_configuration(client, module)
- elif state == 'absent':
+ elif state == "absent":
delete_metrics_configuration(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_sync.py b/ansible_collections/community/aws/plugins/modules/s3_sync.py
index 80e3db0bd..36809ed2f 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_sync.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_sync.py
@@ -1,31 +1,17 @@
#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
---
module: s3_sync
version_added: 1.0.0
short_description: Efficiently upload multiple files to S3
description:
- - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
- inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping.
+- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
+ inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping.
options:
mode:
description:
@@ -127,15 +113,15 @@ options:
default: false
type: bool
-author: Ted Timmons (@tedder)
+author:
+- Ted Timmons (@tedder)
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
+- amazon.aws.common.modules
+- amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: basic upload
community.aws.s3_sync:
bucket: tedder
@@ -166,9 +152,9 @@ EXAMPLES = '''
storage_class: "GLACIER"
include: "*"
exclude: "*.txt,.*"
-'''
+"""
-RETURN = '''
+RETURN = r"""
filelist_initial:
description: file listing (dicts) from initial globbing
returned: always
@@ -241,7 +227,7 @@ uploads:
"whytime": "1477931637 / 1477931489"
}]
-'''
+"""
import datetime
import fnmatch
@@ -251,6 +237,7 @@ import stat as osstat # os.stat constants
try:
from dateutil import tz
+
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
@@ -262,11 +249,10 @@ except ImportError:
from ansible.module_utils._text import to_text
-# import module snippets
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.community.aws.plugins.module_utils.etag import calculate_multipart_etag
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def gather_files(fileroot, include=None, exclude=None):
@@ -275,25 +261,27 @@ def gather_files(fileroot, include=None, exclude=None):
if os.path.isfile(fileroot):
fullpath = fileroot
fstat = os.stat(fullpath)
- path_array = fileroot.split('/')
+ path_array = fileroot.split("/")
chopped_path = path_array[-1]
f_size = fstat[osstat.ST_SIZE]
f_modified_epoch = fstat[osstat.ST_MTIME]
- ret.append({
- 'fullpath': fullpath,
- 'chopped_path': chopped_path,
- 'modified_epoch': f_modified_epoch,
- 'bytes': f_size,
- })
+ ret.append(
+ {
+ "fullpath": fullpath,
+ "chopped_path": chopped_path,
+ "modified_epoch": f_modified_epoch,
+ "bytes": f_size,
+ }
+ )
else:
- for (dirpath, dirnames, filenames) in os.walk(fileroot):
+ for dirpath, dirnames, filenames in os.walk(fileroot):
for fn in filenames:
fullpath = os.path.join(dirpath, fn)
# include/exclude
if include:
found = False
- for x in include.split(','):
+ for x in include.split(","):
if fnmatch.fnmatch(fn, x):
found = True
if not found:
@@ -302,7 +290,7 @@ def gather_files(fileroot, include=None, exclude=None):
if exclude:
found = False
- for x in exclude.split(','):
+ for x in exclude.split(","):
if fnmatch.fnmatch(fn, x):
found = True
if found:
@@ -313,36 +301,38 @@ def gather_files(fileroot, include=None, exclude=None):
fstat = os.stat(fullpath)
f_size = fstat[osstat.ST_SIZE]
f_modified_epoch = fstat[osstat.ST_MTIME]
- ret.append({
- 'fullpath': fullpath,
- 'chopped_path': chopped_path,
- 'modified_epoch': f_modified_epoch,
- 'bytes': f_size,
- })
+ ret.append(
+ {
+ "fullpath": fullpath,
+ "chopped_path": chopped_path,
+ "modified_epoch": f_modified_epoch,
+ "bytes": f_size,
+ }
+ )
# dirpath = path *to* the directory
# dirnames = subdirs *in* our directory
# filenames
return ret
-def calculate_s3_path(filelist, key_prefix=''):
+def calculate_s3_path(filelist, key_prefix=""):
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
- retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
+ retentry["s3_path"] = os.path.join(key_prefix, fileentry["chopped_path"])
ret.append(retentry)
return ret
-def calculate_local_etag(filelist, key_prefix=''):
- '''Really, "calculate md5", but since AWS uses their own format, we'll just call
- it a "local etag". TODO optimization: only calculate if remote key exists.'''
+def calculate_local_etag(filelist, key_prefix=""):
+ """Really, "calculate md5", but since AWS uses their own format, we'll just call
+ it a "local etag". TODO optimization: only calculate if remote key exists."""
ret = []
for fileentry in filelist:
# don't modify the input dict
retentry = fileentry.copy()
- retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
+ retentry["local_etag"] = calculate_multipart_etag(fileentry["fullpath"])
ret.append(retentry)
return ret
@@ -351,20 +341,20 @@ def determine_mimetypes(filelist, override_map):
ret = []
for fileentry in filelist:
retentry = fileentry.copy()
- localfile = fileentry['fullpath']
+ localfile = fileentry["fullpath"]
# reminder: file extension is '.txt', not 'txt'.
file_extension = os.path.splitext(localfile)[1]
if override_map and override_map.get(file_extension):
# override? use it.
- retentry['mime_type'] = override_map[file_extension]
+ retentry["mime_type"] = override_map[file_extension]
else:
# else sniff it
- retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
+ retentry["mime_type"], retentry["encoding"] = mimetypes.guess_type(localfile, strict=False)
# might be None or '' from one of the above. Not a great type but better than nothing.
- if not retentry['mime_type']:
- retentry['mime_type'] = 'application/octet-stream'
+ if not retentry["mime_type"]:
+ retentry["mime_type"] = "application/octet-stream"
ret.append(retentry)
@@ -376,10 +366,10 @@ def head_s3(s3, bucket, s3keys):
for entry in s3keys:
retentry = entry.copy()
try:
- retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
+ retentry["s3_head"] = s3.head_object(Bucket=bucket, Key=entry["s3_path"])
# 404 (Missing) - File doesn't exist, we'll need to upload
# 403 (Denied) - Sometimes we can write but not read, assume we'll need to upload
- except is_boto3_error_code(['404', '403']):
+ except is_boto3_error_code(["404", "403"]):
pass
retkeys.append(retentry)
return retkeys
@@ -389,106 +379,127 @@ def filter_list(s3, bucket, s3filelist, strategy):
keeplist = list(s3filelist)
for e in keeplist:
- e['_strategy'] = strategy
+ e["_strategy"] = strategy
# init/fetch info from S3 if we're going to use it for comparisons
- if not strategy == 'force':
+ if not strategy == "force":
keeplist = head_s3(s3, bucket, s3filelist)
# now actually run the strategies
- if strategy == 'checksum':
+ if strategy == "checksum":
for entry in keeplist:
- if entry.get('s3_head'):
+ if entry.get("s3_head"):
# since we have a remote s3 object, compare the values.
- if entry['s3_head']['ETag'] == entry['local_etag']:
+ if entry["s3_head"]["ETag"] == entry["local_etag"]:
# files match, so remove the entry
- entry['skip_flag'] = True
+ entry["skip_flag"] = True
else:
# file etags don't match, keep the entry.
pass
else: # we don't have an etag, so we'll keep it.
pass
- elif strategy == 'date_size':
+ elif strategy == "date_size":
for entry in keeplist:
- if entry.get('s3_head'):
+ if entry.get("s3_head"):
# fstat = entry['stat']
- local_modified_epoch = entry['modified_epoch']
- local_size = entry['bytes']
+ local_modified_epoch = entry["modified_epoch"]
+ local_size = entry["bytes"]
# py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
# remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
- remote_modified_datetime = entry['s3_head']['LastModified']
- delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
+ remote_modified_datetime = entry["s3_head"]["LastModified"]
+ delta = remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())
remote_modified_epoch = delta.seconds + (delta.days * 86400)
- remote_size = entry['s3_head']['ContentLength']
+ remote_size = entry["s3_head"]["ContentLength"]
- entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch)
- entry['whysize'] = '{0} / {1}'.format(local_size, remote_size)
+ entry["whytime"] = f"{local_modified_epoch} / {remote_modified_epoch}"
+ entry["whysize"] = f"{local_size} / {remote_size}"
if local_modified_epoch <= remote_modified_epoch and local_size == remote_size:
- entry['skip_flag'] = True
+ entry["skip_flag"] = True
else:
- entry['why'] = "no s3_head"
+ entry["why"] = "no s3_head"
# else: probably 'force'. Basically we don't skip with any with other strategies.
else:
pass
# prune 'please skip' entries, if any.
- return [x for x in keeplist if not x.get('skip_flag')]
+ return [x for x in keeplist if not x.get("skip_flag")]
def upload_files(s3, bucket, filelist, params):
ret = []
for entry in filelist:
- args = {
- 'ContentType': entry['mime_type']
- }
- if params.get('permission'):
- args['ACL'] = params['permission']
- if params.get('cache_control'):
- args['CacheControl'] = params['cache_control']
- if params.get('storage_class'):
- args['StorageClass'] = params['storage_class']
+ args = {"ContentType": entry["mime_type"]}
+ if params.get("permission"):
+ args["ACL"] = params["permission"]
+ if params.get("cache_control"):
+ args["CacheControl"] = params["cache_control"]
+ if params.get("storage_class"):
+ args["StorageClass"] = params["storage_class"]
# if this fails exception is caught in main()
- s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
+ s3.upload_file(entry["fullpath"], bucket, entry["s3_path"], ExtraArgs=args, Callback=None, Config=None)
ret.append(entry)
return ret
def remove_files(s3, sourcelist, params):
- bucket = params.get('bucket')
- key_prefix = params.get('key_prefix')
- paginator = s3.get_paginator('list_objects_v2')
- current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', []))
- keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist)
+ bucket = params.get("bucket")
+ key_prefix = params.get("key_prefix")
+ paginator = s3.get_paginator("list_objects_v2")
+ current_keys = set(
+ x["Key"] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get("Contents", [])
+ )
+ keep_keys = set(to_text(source_file["s3_path"]) for source_file in sourcelist)
delete_keys = list(current_keys - keep_keys)
# can delete 1000 objects at a time
- groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)]
+ groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)] # fmt:skip
for keys in groups_of_keys:
- s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]})
+ s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": key} for key in keys]})
return delete_keys
def main():
argument_spec = dict(
- mode=dict(choices=['push'], default='push'),
- file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
+ mode=dict(choices=["push"], default="push"),
+ file_change_strategy=dict(choices=["force", "date_size", "checksum"], default="date_size"),
bucket=dict(required=True),
- key_prefix=dict(required=False, default='', no_log=False),
- file_root=dict(required=True, type='path'),
- permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
- 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
- mime_map=dict(required=False, type='dict'),
+ key_prefix=dict(required=False, default="", no_log=False),
+ file_root=dict(required=True, type="path"),
+ permission=dict(
+ required=False,
+ choices=[
+ "private",
+ "public-read",
+ "public-read-write",
+ "authenticated-read",
+ "aws-exec-read",
+ "bucket-owner-read",
+ "bucket-owner-full-control",
+ ],
+ ),
+ mime_map=dict(required=False, type="dict"),
exclude=dict(required=False, default=".*"),
include=dict(required=False, default="*"),
- cache_control=dict(required=False, default=''),
- delete=dict(required=False, type='bool', default=False),
- storage_class=dict(required=False, default='STANDARD',
- choices=['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA',
- 'INTELLIGENT_TIERING', 'GLACIER', 'DEEP_ARCHIVE', 'OUTPOSTS']),
+ cache_control=dict(required=False, default=""),
+ delete=dict(required=False, type="bool", default=False),
+ storage_class=dict(
+ required=False,
+ default="STANDARD",
+ choices=[
+ "STANDARD",
+ "REDUCED_REDUNDANCY",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "GLACIER",
+ "DEEP_ARCHIVE",
+ "OUTPOSTS",
+ ],
+ ),
# future options: encoding, metadata, retries
)
@@ -497,36 +508,43 @@ def main():
)
if not HAS_DATEUTIL:
- module.fail_json(msg='dateutil required for this module')
+ module.fail_json(msg="dateutil required for this module")
result = {}
- mode = module.params['mode']
+ mode = module.params["mode"]
try:
- s3 = module.client('s3')
+ s3 = module.client("s3")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
- if mode == 'push':
+ if mode == "push":
try:
- result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
- result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
- result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
+ result["filelist_initial"] = gather_files(
+ module.params["file_root"], exclude=module.params["exclude"], include=module.params["include"]
+ )
+ result["filelist_typed"] = determine_mimetypes(result["filelist_initial"], module.params.get("mime_map"))
+ result["filelist_s3"] = calculate_s3_path(result["filelist_typed"], module.params["key_prefix"])
try:
- result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
+ result["filelist_local_etag"] = calculate_local_etag(result["filelist_s3"])
except ValueError as e:
- if module.params['file_change_strategy'] == 'checksum':
- module.fail_json_aws(e, 'Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy')
- result['filelist_local_etag'] = result['filelist_s3'].copy()
- result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
- result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
-
- if module.params['delete']:
- result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params)
+ if module.params["file_change_strategy"] == "checksum":
+ module.fail_json_aws(
+ e,
+ "Unable to calculate checksum. If running in FIPS mode, you may need to use another file_change_strategy",
+ )
+ result["filelist_local_etag"] = result["filelist_s3"].copy()
+ result["filelist_actionable"] = filter_list(
+ s3, module.params["bucket"], result["filelist_local_etag"], module.params["file_change_strategy"]
+ )
+ result["uploads"] = upload_files(s3, module.params["bucket"], result["filelist_actionable"], module.params)
+
+ if module.params["delete"]:
+ result["removed"] = remove_files(s3, result["filelist_local_etag"], module.params)
# mark changed if we actually upload something.
- if result.get('uploads') or result.get('removed'):
- result['changed'] = True
+ if result.get("uploads") or result.get("removed"):
+ result["changed"] = True
# result.update(filelist=actionable_filelist)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to push file")
@@ -534,5 +552,5 @@ def main():
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/s3_website.py b/ansible_collections/community/aws/plugins/modules/s3_website.py
index 81d3169cd..1c212d117 100644
--- a/ansible_collections/community/aws/plugins/modules/s3_website.py
+++ b/ansible_collections/community/aws/plugins/modules/s3_website.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: s3_website
version_added: 1.0.0
short_description: Configure an s3 bucket as a website
description:
- - Configure an s3 bucket as a website
-author: Rob White (@wimnat)
+ - Configure an s3 bucket as a website
+author:
+ - Rob White (@wimnat)
options:
name:
description:
@@ -44,13 +43,12 @@ options:
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Configure an s3 bucket to redirect all requests to example.com
@@ -70,10 +68,9 @@ EXAMPLES = '''
suffix: home.htm
error_key: errors/404.htm
state: present
+"""
-'''
-
-RETURN = '''
+RETURN = r"""
index_document:
description: index document
type: complex
@@ -157,7 +154,7 @@ routing_rules:
returned: when routing rule present
type: str
sample: documents/
-'''
+"""
import time
@@ -168,45 +165,43 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-def _create_redirect_dict(url):
+def _create_redirect_dict(url):
redirect_dict = {}
- url_split = url.split(':')
+ url_split = url.split(":")
# Did we split anything?
if len(url_split) == 2:
- redirect_dict[u'Protocol'] = url_split[0]
- redirect_dict[u'HostName'] = url_split[1].replace('//', '')
+ redirect_dict["Protocol"] = url_split[0]
+ redirect_dict["HostName"] = url_split[1].replace("//", "")
elif len(url_split) == 1:
- redirect_dict[u'HostName'] = url_split[0]
+ redirect_dict["HostName"] = url_split[0]
else:
- raise ValueError('Redirect URL appears invalid')
+ raise ValueError("Redirect URL appears invalid")
return redirect_dict
def _create_website_configuration(suffix, error_key, redirect_all_requests):
-
website_configuration = {}
if error_key is not None:
- website_configuration['ErrorDocument'] = {'Key': error_key}
+ website_configuration["ErrorDocument"] = {"Key": error_key}
if suffix is not None:
- website_configuration['IndexDocument'] = {'Suffix': suffix}
+ website_configuration["IndexDocument"] = {"Suffix": suffix}
if redirect_all_requests is not None:
- website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
+ website_configuration["RedirectAllRequestsTo"] = _create_redirect_dict(redirect_all_requests)
return website_configuration
def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
-
bucket_name = module.params.get("name")
redirect_all_requests = module.params.get("redirect_all_requests")
# If redirect_all_requests is set then don't use the default suffix that has been set
@@ -224,14 +219,19 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m
try:
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
- except is_boto3_error_code('NoSuchWebsiteConfiguration'):
+ except is_boto3_error_code("NoSuchWebsiteConfiguration"):
website_config = None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to get website configuration")
if website_config is None:
try:
- bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ bucket_website.put(
+ WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)
+ )
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to set bucket website configuration")
@@ -239,18 +239,26 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m
module.fail_json(msg=str(e))
else:
try:
- if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
- (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
- (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
-
+ if (
+ (suffix is not None and website_config["IndexDocument"]["Suffix"] != suffix)
+ or (error_key is not None and website_config["ErrorDocument"]["Key"] != error_key)
+ or (
+ redirect_all_requests is not None
+ and website_config["RedirectAllRequestsTo"] != _create_redirect_dict(redirect_all_requests)
+ )
+ ):
try:
- bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ bucket_website.put(
+ WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)
+ )
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update bucket website configuration")
except KeyError as e:
try:
- bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ bucket_website.put(
+ WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)
+ )
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update bucket website configuration")
@@ -265,15 +273,17 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m
def disable_bucket_as_website(client_connection, module):
-
changed = False
bucket_name = module.params.get("name")
try:
client_connection.get_bucket_website(Bucket=bucket_name)
- except is_boto3_error_code('NoSuchWebsiteConfiguration'):
+ except is_boto3_error_code("NoSuchWebsiteConfiguration"):
module.exit_json(changed=changed)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to get bucket website")
try:
@@ -286,36 +296,35 @@ def disable_bucket_as_website(client_connection, module):
def main():
-
argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', required=True, choices=['present', 'absent']),
- suffix=dict(type='str', required=False, default='index.html'),
- error_key=dict(type='str', required=False, no_log=False),
- redirect_all_requests=dict(type='str', required=False),
+ name=dict(type="str", required=True),
+ state=dict(type="str", required=True, choices=["present", "absent"]),
+ suffix=dict(type="str", required=False, default="index.html"),
+ error_key=dict(type="str", required=False, no_log=False),
+ redirect_all_requests=dict(type="str", required=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[
- ['redirect_all_requests', 'suffix'],
- ['redirect_all_requests', 'error_key']
+ ["redirect_all_requests", "suffix"],
+ ["redirect_all_requests", "error_key"],
],
)
try:
- client_connection = module.client('s3')
- resource_connection = module.resource('s3')
+ client_connection = module.client("s3")
+ resource_connection = module.resource("s3")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
enable_or_update_bucket_as_website(client_connection, resource_connection, module)
- elif state == 'absent':
+ elif state == "absent":
disable_bucket_as_website(client_connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py b/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py
index 851746189..fb2ff8ebe 100644
--- a/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py
+++ b/ansible_collections/community/aws/plugins/modules/secretsmanager_secret.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2018, REY Remi
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: secretsmanager_secret
version_added: 1.0.0
@@ -107,16 +105,16 @@ options:
- Specifies the number of days between automatic scheduled rotations of the secret.
default: 30
type: int
-extends_documentation_fragment:
- - amazon.aws.ec2
- - amazon.aws.aws
- - amazon.aws.boto3
- - amazon.aws.tags
notes:
- Support for I(purge_tags) was added in release 4.0.0.
-'''
+extends_documentation_fragment:
+ - amazon.aws.region.modules
+ - amazon.aws.common.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Add string to AWS Secrets Manager
community.aws.secretsmanager_secret:
name: 'test_secret_string'
@@ -146,9 +144,9 @@ EXAMPLES = r'''
secret_type: 'string'
secret: "{{ lookup('community.general.random_string', length=16, special=false) }}"
overwrite: false
-'''
+"""
-RETURN = r'''
+RETURN = r"""
secret:
description: The secret information
returned: always
@@ -212,27 +210,44 @@ secret:
returned: when the secret has tags
example: {'MyTagName': 'Some Value'}
version_added: 4.0.0
-'''
+"""
-from ansible.module_utils._text import to_bytes
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
-from traceback import format_exc
import json
+from traceback import format_exc
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # handled by AnsibleAWSModule
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class Secret(object):
"""An object representation of the Secret described by the self.module args"""
+
def __init__(
- self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None,
- tags=None, lambda_arn=None, rotation_interval=None, replica_regions=None,
+ self,
+ name,
+ secret_type,
+ secret,
+ resource_policy=None,
+ description="",
+ kms_key_id=None,
+ tags=None,
+ lambda_arn=None,
+ rotation_interval=None,
+ replica_regions=None,
):
self.name = name
self.description = description
@@ -253,9 +268,7 @@ class Secret(object):
@property
def create_args(self):
- args = {
- "Name": self.name
- }
+ args = {"Name": self.name}
if self.description:
args["Description"] = self.description
if self.kms_key_id:
@@ -264,10 +277,9 @@ class Secret(object):
add_replica_regions = []
for replica in self.replica_regions:
if replica["kms_key_id"]:
- add_replica_regions.append({'Region': replica["region"],
- 'KmsKeyId': replica["kms_key_id"]})
+ add_replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]})
else:
- add_replica_regions.append({'Region': replica["region"]})
+ add_replica_regions.append({"Region": replica["region"]})
args["AddReplicaRegions"] = add_replica_regions
if self.tags:
args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags)
@@ -276,9 +288,7 @@ class Secret(object):
@property
def update_args(self):
- args = {
- "SecretId": self.name
- }
+ args = {"SecretId": self.name}
if self.description:
args["Description"] = self.description
if self.kms_key_id:
@@ -288,9 +298,7 @@ class Secret(object):
@property
def secret_resource_policy_args(self):
- args = {
- "SecretId": self.name
- }
+ args = {"SecretId": self.name}
if self.resource_policy:
args["ResourcePolicy"] = self.resource_policy
return args
@@ -310,7 +318,7 @@ class SecretsManagerInterface(object):
def __init__(self, module):
self.module = module
- self.client = self.module.client('secretsmanager')
+ self.client = self.module.client("secretsmanager")
def get_secret(self, name):
try:
@@ -358,7 +366,7 @@ class SecretsManagerInterface(object):
try:
json.loads(secret.secret_resource_policy_args.get("ResourcePolicy"))
except (TypeError, ValueError) as e:
- self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc())
+ self.module.fail_json(msg=f"Failed to parse resource policy as JSON: {str(e)}", exception=format_exc())
try:
response = self.client.put_resource_policy(**secret.secret_resource_policy_args)
@@ -371,9 +379,7 @@ class SecretsManagerInterface(object):
self.module.exit_json(changed=True)
try:
replica_regions = []
- response = self.client.remove_regions_from_replication(
- SecretId=name,
- RemoveReplicaRegions=regions)
+ response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to replicate secret")
return response
@@ -385,12 +391,10 @@ class SecretsManagerInterface(object):
replica_regions = []
for replica in regions:
if replica["kms_key_id"]:
- replica_regions.append({'Region': replica["region"], 'KmsKeyId': replica["kms_key_id"]})
+ replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]})
else:
- replica_regions.append({'Region': replica["region"]})
- response = self.client.replicate_secret_to_regions(
- SecretId=name,
- AddReplicaRegions=replica_regions)
+ replica_regions.append({"Region": replica["region"]})
+ response = self.client.replicate_secret_to_regions(SecretId=name, AddReplicaRegions=replica_regions)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to replicate secret")
return response
@@ -431,7 +435,8 @@ class SecretsManagerInterface(object):
response = self.client.rotate_secret(
SecretId=secret.name,
RotationLambdaARN=secret.rotation_lambda_arn,
- RotationRules=secret.rotation_rules)
+ RotationRules=secret.rotation_rules,
+ )
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to rotate secret secret")
else:
@@ -471,7 +476,7 @@ class SecretsManagerInterface(object):
if desired_secret.kms_key_id != current_secret.get("KmsKeyId"):
return False
current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name"))
- if desired_secret.secret_type == 'SecretBinary':
+ if desired_secret.secret_type == "SecretBinary":
desired_value = to_bytes(desired_secret.secret)
else:
desired_value = desired_secret.secret
@@ -532,65 +537,69 @@ def compare_regions(desired_secret, current_secret):
def main():
replica_args = dict(
- region=dict(type='str', required=True),
- kms_key_id=dict(type='str', required=False),
+ region=dict(type="str", required=True),
+ kms_key_id=dict(type="str", required=False),
)
module = AnsibleAWSModule(
argument_spec={
- 'name': dict(required=True),
- 'state': dict(choices=['present', 'absent'], default='present'),
- 'overwrite': dict(type='bool', default=True),
- 'description': dict(default=""),
- 'replica': dict(type='list', elements='dict', options=replica_args),
- 'kms_key_id': dict(),
- 'secret_type': dict(choices=['binary', 'string'], default="string"),
- 'secret': dict(default="", no_log=True),
- 'json_secret': dict(type='json', no_log=True),
- 'resource_policy': dict(type='json', default=None),
- 'tags': dict(type='dict', default=None, aliases=['resource_tags']),
- 'purge_tags': dict(type='bool', default=True),
- 'rotation_lambda': dict(),
- 'rotation_interval': dict(type='int', default=30),
- 'recovery_window': dict(type='int', default=30),
+ "name": dict(required=True),
+ "state": dict(choices=["present", "absent"], default="present"),
+ "overwrite": dict(type="bool", default=True),
+ "description": dict(default=""),
+ "replica": dict(type="list", elements="dict", options=replica_args),
+ "kms_key_id": dict(),
+ "secret_type": dict(choices=["binary", "string"], default="string"),
+ "secret": dict(default="", no_log=True),
+ "json_secret": dict(type="json", no_log=True),
+ "resource_policy": dict(type="json", default=None),
+ "tags": dict(type="dict", default=None, aliases=["resource_tags"]),
+ "purge_tags": dict(type="bool", default=True),
+ "rotation_lambda": dict(),
+ "rotation_interval": dict(type="int", default=30),
+ "recovery_window": dict(type="int", default=30),
},
- mutually_exclusive=[['secret', 'json_secret']],
+ mutually_exclusive=[["secret", "json_secret"]],
supports_check_mode=True,
)
changed = False
- state = module.params.get('state')
+ state = module.params.get("state")
secrets_mgr = SecretsManagerInterface(module)
- recovery_window = module.params.get('recovery_window')
+ recovery_window = module.params.get("recovery_window")
secret = Secret(
- module.params.get('name'),
- module.params.get('secret_type'),
- module.params.get('secret') or module.params.get('json_secret'),
- description=module.params.get('description'),
- replica_regions=module.params.get('replica'),
- kms_key_id=module.params.get('kms_key_id'),
- resource_policy=module.params.get('resource_policy'),
- tags=module.params.get('tags'),
- lambda_arn=module.params.get('rotation_lambda'),
- rotation_interval=module.params.get('rotation_interval')
+ module.params.get("name"),
+ module.params.get("secret_type"),
+ module.params.get("secret") or module.params.get("json_secret"),
+ description=module.params.get("description"),
+ replica_regions=module.params.get("replica"),
+ kms_key_id=module.params.get("kms_key_id"),
+ resource_policy=module.params.get("resource_policy"),
+ tags=module.params.get("tags"),
+ lambda_arn=module.params.get("rotation_lambda"),
+ rotation_interval=module.params.get("rotation_interval"),
)
- purge_tags = module.params.get('purge_tags')
+ purge_tags = module.params.get("purge_tags")
current_secret = secrets_mgr.get_secret(secret.name)
- if state == 'absent':
+ if state == "absent":
if current_secret:
if not current_secret.get("DeletedDate"):
- result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
+ result = camel_dict_to_snake_dict(
+ secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)
+ )
changed = True
elif current_secret.get("DeletedDate") and recovery_window == 0:
- result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
+ result = camel_dict_to_snake_dict(
+ secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)
+ )
changed = True
else:
result = "secret already scheduled for deletion"
else:
result = "secret does not exist"
- if state == 'present':
+ if state == "present":
if current_secret is None:
result = secrets_mgr.create_secret(secret)
if secret.resource_policy and result.get("ARN"):
@@ -602,7 +611,7 @@ def main():
secrets_mgr.restore_secret(secret.name)
changed = True
if not secrets_mgr.secrets_match(secret, current_secret):
- overwrite = module.params.get('overwrite')
+ overwrite = module.params.get("overwrite")
if overwrite:
result = secrets_mgr.update_secret(secret)
changed = True
@@ -619,8 +628,8 @@ def main():
result = secrets_mgr.put_resource_policy(secret)
changed = True
- if module.params.get('tags') is not None:
- current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', []))
+ if module.params.get("tags") is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(current_secret.get("Tags", []))
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags)
if tags_to_add:
secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
@@ -638,12 +647,12 @@ def main():
changed = True
result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
- if result.get('tags', None) is not None:
- result['tags_dict'] = boto3_tag_list_to_ansible_dict(result.get('tags', []))
+ if result.get("tags", None) is not None:
+ result["tags_dict"] = boto3_tag_list_to_ansible_dict(result.get("tags", []))
result.pop("response_metadata")
module.exit_json(changed=changed, secret=result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ses_identity.py b/ansible_collections/community/aws/plugins/modules/ses_identity.py
index 997692df6..785519bd3 100644
--- a/ansible_collections/community/aws/plugins/modules/ses_identity.py
+++ b/ansible_collections/community/aws/plugins/modules/ses_identity.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ses_identity
version_added: 1.0.0
@@ -86,14 +84,14 @@ options:
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
- default: True
+ default: true
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Ensure example@example.com email identity exists
@@ -117,7 +115,7 @@ EXAMPLES = '''
community.aws.sns_topic:
name: "complaints-topic"
state: present
- purge_subscriptions: False
+ purge_subscriptions: false
register: topic_info
- name: Deliver feedback to topic instead of owner email
@@ -126,11 +124,11 @@ EXAMPLES = '''
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
- include_headers: True
+ include_headers: true
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
- include_headers: False
- feedback_forwarding: False
+ include_headers: false
+ feedback_forwarding: false
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
@@ -138,7 +136,7 @@ EXAMPLES = '''
community.aws.sns_topic:
name: "delivery-notifications-topic"
state: present
- purge_subscriptions: False
+ purge_subscriptions: false
register: topic_info
- name: Delivery notifications to topic
@@ -147,9 +145,9 @@ EXAMPLES = '''
state: present
delivery_notifications:
topic: "{{ topic_info.sns_arn }}"
-'''
+"""
-RETURN = '''
+RETURN = r"""
identity:
description: The identity being modified.
returned: success
@@ -217,19 +215,22 @@ notification_attributes:
headers_in_delivery_notifications_enabled:
description: Whether or not headers are included in messages delivered to the delivery topic.
type: bool
-'''
-
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+"""
import time
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
@@ -241,8 +242,8 @@ def get_verification_attributes(connection, module, identity, retries=0, retryDe
try:
response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
- identity_verification = response['VerificationAttributes']
+ module.fail_json_aws(e, msg=f"Failed to retrieve identity verification attributes for {identity}")
+ identity_verification = response["VerificationAttributes"]
if identity in identity_verification:
break
time.sleep(retryDelay)
@@ -262,8 +263,8 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel
try:
response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
- notification_attributes = response['NotificationAttributes']
+ module.fail_json_aws(e, msg=f"Failed to retrieve identity notification attributes for {identity}")
+ notification_attributes = response["NotificationAttributes"]
# No clear AWS docs on when this happens, but it appears sometimes identities are not included in
# in the notification attributes when the identity is first registered. Suspect that this is caused by
@@ -279,7 +280,7 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel
# something has gone very wrong.
if len(notification_attributes) != 0:
module.fail_json(
- msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
+ msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format(
identity,
notification_attributes.keys(),
)
@@ -291,46 +292,60 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel
def desired_topic(module, notification_type):
- arg_dict = module.params.get(notification_type.lower() + '_notifications')
+ arg_dict = module.params.get(notification_type.lower() + "_notifications")
if arg_dict:
- return arg_dict.get('topic', None)
+ return arg_dict.get("topic", None)
else:
return None
def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
- topic_key = notification_type + 'Topic'
+ # Not passing the parameter should not cause any changes.
+ if module.params.get(f"{notification_type.lower()}_notifications") is None:
+ return False
+
+ topic_key = notification_type + "Topic"
if identity_notifications is None:
# If there is no configuration for notifications cannot be being sent to topics
# hence assume None as the current state.
- current = None
+ current_topic = None
elif topic_key in identity_notifications:
- current = identity_notifications[topic_key]
+ current_topic = identity_notifications[topic_key]
else:
# If there is information on the notifications setup but no information on the
# particular notification topic it's pretty safe to assume there's no topic for
# this notification. AWS API docs suggest this information will always be
# included but best to be defensive
- current = None
+ current_topic = None
- required = desired_topic(module, notification_type)
+ required_topic = desired_topic(module, notification_type)
- if current != required:
+ if current_topic != required_topic:
try:
if not module.check_mode:
- connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
+ request_kwargs = {
+ "Identity": identity,
+ "NotificationType": notification_type,
+ "aws_retry": True,
+ }
+
+ # The topic has to be omitted from the request to disable the notification.
+ if required_topic is not None:
+ request_kwargs["SnsTopic"] = required_topic
+
+ connection.set_identity_notification_topic(**request_kwargs)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
- identity=identity,
- notification_type=notification_type,
- ))
+ module.fail_json_aws(
+ e,
+ msg=f"Failed to set identity notification topic for {identity} {notification_type}",
+ )
return True
return False
def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
- arg_dict = module.params.get(notification_type.lower() + '_notifications')
- header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
+ arg_dict = module.params.get(notification_type.lower() + "_notifications")
+ header_key = "HeadersIn" + notification_type + "NotificationsEnabled"
if identity_notifications is None:
# If there is no configuration for topic notifications, headers cannot be being
# forwarded, hence assume false.
@@ -343,21 +358,21 @@ def update_notification_topic_headers(connection, module, identity, identity_not
# headers are not included since most API consumers would interpret absence as false.
current = False
- if arg_dict is not None and 'include_headers' in arg_dict:
- required = arg_dict['include_headers']
+ if arg_dict is not None and "include_headers" in arg_dict:
+ required = arg_dict["include_headers"]
else:
required = False
if current != required:
try:
if not module.check_mode:
- connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
- aws_retry=True)
+ connection.set_identity_headers_in_notifications_enabled(
+ Identity=identity, NotificationType=notification_type, Enabled=required, aws_retry=True
+ )
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
- identity=identity,
- notification_type=notification_type,
- ))
+ module.fail_json_aws(
+ e, msg=f"Failed to set identity headers in notification for {identity} {notification_type}"
+ )
return True
return False
@@ -368,51 +383,55 @@ def update_feedback_forwarding(connection, module, identity, identity_notificati
# are being handled by SNS topics. So in the absence of identity_notifications
# information existing feedback forwarding must be on.
current = True
- elif 'ForwardingEnabled' in identity_notifications:
- current = identity_notifications['ForwardingEnabled']
+ elif "ForwardingEnabled" in identity_notifications:
+ current = identity_notifications["ForwardingEnabled"]
else:
# If there is information on the notifications setup but no information on the
# forwarding state it's pretty safe to assume forwarding is off. AWS API docs
# suggest this information will always be included but best to be defensive
current = False
- required = module.params.get('feedback_forwarding')
+ required = module.params.get("feedback_forwarding")
if current != required:
try:
if not module.check_mode:
- connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
+ connection.set_identity_feedback_forwarding_enabled(
+ Identity=identity, ForwardingEnabled=required, aws_retry=True
+ )
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
+ module.fail_json_aws(e, msg=f"Failed to set identity feedback forwarding for {identity}")
return True
return False
def create_mock_notifications_response(module):
resp = {
- "ForwardingEnabled": module.params.get('feedback_forwarding'),
+ "ForwardingEnabled": module.params.get("feedback_forwarding"),
}
- for notification_type in ('Bounce', 'Complaint', 'Delivery'):
- arg_dict = module.params.get(notification_type.lower() + '_notifications')
- if arg_dict is not None and 'topic' in arg_dict:
- resp[notification_type + 'Topic'] = arg_dict['topic']
-
- header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
- if arg_dict is not None and 'include_headers' in arg_dict:
- resp[header_key] = arg_dict['include_headers']
+ for notification_type in ("Bounce", "Complaint", "Delivery"):
+ arg_dict = module.params.get(notification_type.lower() + "_notifications")
+ if arg_dict is not None and "topic" in arg_dict:
+ resp[notification_type + "Topic"] = arg_dict["topic"]
+
+ header_key = "HeadersIn" + notification_type + "NotificationsEnabled"
+ if arg_dict is not None and "include_headers" in arg_dict:
+ resp[header_key] = arg_dict["include_headers"]
else:
resp[header_key] = False
return resp
def update_identity_notifications(connection, module):
- identity = module.params.get('identity')
+ identity = module.params.get("identity")
changed = False
identity_notifications = get_identity_notifications(connection, module, identity)
- for notification_type in ('Bounce', 'Complaint', 'Delivery'):
+ for notification_type in ("Bounce", "Complaint", "Delivery"):
changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
- changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
+ changed |= update_notification_topic_headers(
+ connection, module, identity, identity_notifications, notification_type
+ )
changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
@@ -425,25 +444,29 @@ def update_identity_notifications(connection, module):
def validate_params_for_identity_present(module):
- if module.params.get('feedback_forwarding') is False:
- if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
- module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
- "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
+ if module.params.get("feedback_forwarding") is False:
+ if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")):
+ module.fail_json(
+ msg=(
+ "Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
+ "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics"
+ )
+ )
def create_or_update_identity(connection, module, region, account_id):
- identity = module.params.get('identity')
+ identity = module.params.get("identity")
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is None:
try:
if not module.check_mode:
- if '@' in identity:
+ if "@" in identity:
connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
else:
connection.verify_domain_identity(Domain=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
+ module.fail_json_aws(e, msg=f"Failed to verify identity {identity}")
if module.check_mode:
verification_attributes = {
"VerificationStatus": "Pending",
@@ -451,20 +474,22 @@ def create_or_update_identity(connection, module, region, account_id):
else:
verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
changed = True
- elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
- module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
- verification_attributes=camel_dict_to_snake_dict(verification_attributes))
+ elif verification_attributes["VerificationStatus"] not in ("Pending", "Success"):
+ module.fail_json(
+ msg="Identity " + identity + " in bad status " + verification_attributes["VerificationStatus"],
+ verification_attributes=camel_dict_to_snake_dict(verification_attributes),
+ )
if verification_attributes is None:
- module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
+ module.fail_json(msg="Unable to load identity verification attributes after registering identity.")
notifications_changed, notification_attributes = update_identity_notifications(connection, module)
changed |= notifications_changed
if notification_attributes is None:
- module.fail_json(msg='Unable to load identity notification attributes.')
+ module.fail_json(msg="Unable to load identity notification attributes.")
- identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
+ identity_arn = "arn:aws:ses:" + region + ":" + account_id + ":identity/" + identity
module.exit_json(
changed=changed,
@@ -476,7 +501,7 @@ def create_or_update_identity(connection, module, region, account_id):
def destroy_identity(connection, module):
- identity = module.params.get('identity')
+ identity = module.params.get("identity")
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is not None:
@@ -484,7 +509,7 @@ def destroy_identity(connection, module):
if not module.check_mode:
connection.delete_identity(Identity=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
+ module.fail_json_aws(e, msg=f"Failed to delete identity {identity}")
changed = True
module.exit_json(
@@ -494,44 +519,50 @@ def destroy_identity(connection, module):
def get_account_id(module):
- sts = module.client('sts')
+ sts = module.client("sts")
try:
caller_identity = sts.get_caller_identity()
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve caller identity')
- return caller_identity['Account']
+ module.fail_json_aws(e, msg="Failed to retrieve caller identity")
+ return caller_identity["Account"]
def main():
module = AnsibleAWSModule(
argument_spec={
- "identity": dict(required=True, type='str'),
- "state": dict(default='present', choices=['present', 'absent']),
- "bounce_notifications": dict(type='dict'),
- "complaint_notifications": dict(type='dict'),
- "delivery_notifications": dict(type='dict'),
- "feedback_forwarding": dict(default=True, type='bool'),
+ "identity": dict(required=True, type="str"),
+ "state": dict(default="present", choices=["present", "absent"]),
+ "bounce_notifications": dict(type="dict"),
+ "complaint_notifications": dict(type="dict"),
+ "delivery_notifications": dict(type="dict"),
+ "feedback_forwarding": dict(default=True, type="bool"),
},
supports_check_mode=True,
)
- for notification_type in ('bounce', 'complaint', 'delivery'):
- param_name = notification_type + '_notifications'
+ for notification_type in ("bounce", "complaint", "delivery"):
+ param_name = notification_type + "_notifications"
arg_dict = module.params.get(param_name)
if arg_dict:
- extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
+ extra_keys = [x for x in arg_dict.keys() if x not in ("topic", "include_headers")]
if extra_keys:
- module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
+ module.fail_json(
+ msg="Unexpected keys "
+ + str(extra_keys)
+ + " in "
+ + param_name
+ + " valid keys are topic or include_headers"
+ )
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
- connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+ connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
region = module.region
account_id = get_account_id(module)
validate_params_for_identity_present(module)
@@ -540,5 +571,5 @@ def main():
destroy_identity(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py b/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py
index 16d9f1ded..9b7a3d6b6 100644
--- a/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py
+++ b/ansible_collections/community/aws/plugins/modules/ses_identity_policy.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ses_identity_policy
version_added: 1.0.0
@@ -41,12 +39,12 @@ options:
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: add sending authorization policy to domain identity
@@ -75,42 +73,45 @@ EXAMPLES = '''
identity: example.com
policy_name: ExamplePolicy
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
policies:
description: A list of all policies present on the identity after the operation.
returned: success
type: list
sample: [ExamplePolicy]
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry
+"""
import json
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def get_identity_policy(connection, module, identity, policy_name):
try:
response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
- policies = response['Policies']
+ module.fail_json_aws(e, msg=f"Failed to retrieve identity policy {policy_name}")
+ policies = response["Policies"]
if policy_name in policies:
return policies[policy_name]
return None
def create_or_update_identity_policy(connection, module):
- identity = module.params.get('identity')
- policy_name = module.params.get('policy_name')
- required_policy = module.params.get('policy')
+ identity = module.params.get("identity")
+ policy_name = module.params.get("policy_name")
+ required_policy = module.params.get("policy")
required_policy_dict = json.loads(required_policy)
changed = False
@@ -120,9 +121,11 @@ def create_or_update_identity_policy(connection, module):
changed = True
try:
if not module.check_mode:
- connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
+ connection.put_identity_policy(
+ Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True
+ )
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
+ module.fail_json_aws(e, msg=f"Failed to put identity policy {policy_name}")
# Load the list of applied policies to include in the response.
# In principle we should be able to just return the response, but given
@@ -133,9 +136,9 @@ def create_or_update_identity_policy(connection, module):
#
# As a nice side benefit this also means the return is correct in check mode
try:
- policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
+ policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"]
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to list identity policies')
+ module.fail_json_aws(e, msg="Failed to list identity policies")
if policy_name is not None and policy_name not in policies_present:
policies_present = list(policies_present)
policies_present.append(policy_name)
@@ -146,20 +149,20 @@ def create_or_update_identity_policy(connection, module):
def delete_identity_policy(connection, module):
- identity = module.params.get('identity')
- policy_name = module.params.get('policy_name')
+ identity = module.params.get("identity")
+ policy_name = module.params.get("policy_name")
changed = False
try:
- policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
+ policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)["PolicyNames"]
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to list identity policies')
+ module.fail_json_aws(e, msg="Failed to list identity policies")
if policy_name in policies_present:
try:
if not module.check_mode:
connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
+ module.fail_json_aws(e, msg=f"Failed to delete identity policy {policy_name}")
changed = True
policies_present = list(policies_present)
policies_present.remove(policy_name)
@@ -173,12 +176,12 @@ def delete_identity_policy(connection, module):
def main():
module = AnsibleAWSModule(
argument_spec={
- 'identity': dict(required=True, type='str'),
- 'state': dict(default='present', choices=['present', 'absent']),
- 'policy_name': dict(required=True, type='str'),
- 'policy': dict(type='json', default=None),
+ "identity": dict(required=True, type="str"),
+ "state": dict(default="present", choices=["present", "absent"]),
+ "policy_name": dict(required=True, type="str"),
+ "policy": dict(type="json", default=None),
},
- required_if=[['state', 'present', ['policy']]],
+ required_if=[["state", "present", ["policy"]]],
supports_check_mode=True,
)
@@ -186,15 +189,15 @@ def main():
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
- connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+ connection = module.client("ses", retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
- if state == 'present':
+ if state == "present":
create_or_update_identity_policy(connection, module)
else:
delete_identity_policy(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ses_rule_set.py b/ansible_collections/community/aws/plugins/modules/ses_rule_set.py
index b42ac8088..cf478c0f9 100644
--- a/ansible_collections/community/aws/plugins/modules/ses_rule_set.py
+++ b/ansible_collections/community/aws/plugins/modules/ses_rule_set.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017, Ben Tomasik <ben@tomasik.io>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ses_rule_set
version_added: 1.0.0
@@ -46,15 +44,14 @@ options:
required: False
default: False
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
+
+EXAMPLES = r"""
+# Note: These examples do not set authentication details, see the AWS Guide for details.
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
----
- name: Create default rule set and activate it if not already
community.aws.ses_rule_set:
name: default-rule-set
@@ -84,7 +81,7 @@ EXAMPLES = """
force: true
"""
-RETURN = """
+RETURN = r"""
active:
description: if the SES rule set is active
returned: success if I(state) is C(present)
@@ -100,25 +97,29 @@ rule_sets:
}]
"""
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # handled by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def list_rule_sets(client, module):
try:
response = client.list_receipt_rule_sets(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't list rule sets.")
- return response['RuleSets']
+ return response["RuleSets"]
def rule_set_in(name, rule_sets):
- return any(s for s in rule_sets if s['Name'] == name)
+ return any(s for s in rule_sets if s["Name"] == name)
def ruleset_active(client, module, name):
@@ -126,8 +127,8 @@ def ruleset_active(client, module, name):
active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't get the active rule set.")
- if active_rule_set is not None and 'Metadata' in active_rule_set:
- return name == active_rule_set['Metadata']['Name']
+ if active_rule_set is not None and "Metadata" in active_rule_set:
+ return name == active_rule_set["Metadata"]["Name"]
else:
# Metadata was not set meaning there is no active rule set
return False
@@ -153,7 +154,7 @@ def update_active_rule_set(client, module, name, desired_active):
try:
client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name))
+ module.fail_json_aws(e, msg=f"Couldn't set active rule set to {name}.")
changed = True
active = True
elif not desired_active and active:
@@ -165,7 +166,7 @@ def update_active_rule_set(client, module, name, desired_active):
def create_or_update_rule_set(client, module):
- name = module.params.get('name')
+ name = module.params.get("name")
check_mode = module.check_mode
changed = False
@@ -175,14 +176,16 @@ def create_or_update_rule_set(client, module):
try:
client.create_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name))
+ module.fail_json_aws(e, msg=f"Couldn't create rule set {name}.")
changed = True
rule_sets = list(rule_sets)
- rule_sets.append({
- 'Name': name,
- })
+ rule_sets.append(
+ {
+ "Name": name,
+ }
+ )
- (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active'))
+ (active_changed, active) = update_active_rule_set(client, module, name, module.params.get("active"))
changed |= active_changed
module.exit_json(
@@ -193,30 +196,33 @@ def create_or_update_rule_set(client, module):
def remove_rule_set(client, module):
- name = module.params.get('name')
+ name = module.params.get("name")
check_mode = module.check_mode
changed = False
rule_sets = list_rule_sets(client, module)
if rule_set_in(name, rule_sets):
active = ruleset_active(client, module, name)
- if active and not module.params.get('force'):
+ if active and not module.params.get("force"):
module.fail_json(
- msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name),
+ msg=(
+ f"Couldn't delete rule set {name} because it is currently active. Set force=true to delete an"
+ " active ruleset."
+ ),
error={
"code": "CannotDelete",
- "message": "Cannot delete active rule set: {0}".format(name),
- }
+ "message": f"Cannot delete active rule set: {name}",
+ },
)
if not check_mode:
- if active and module.params.get('force'):
+ if active and module.params.get("force"):
deactivate_rule_set(client, module)
try:
client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name))
+ module.fail_json_aws(e, msg=f"Couldn't delete rule set {name}.")
changed = True
- rule_sets = [x for x in rule_sets if x['Name'] != name]
+ rule_sets = [x for x in rule_sets if x["Name"] != name]
module.exit_json(
changed=changed,
@@ -226,27 +232,27 @@ def remove_rule_set(client, module):
def main():
argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- active=dict(type='bool'),
- force=dict(type='bool', default=False),
+ name=dict(type="str", required=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ active=dict(type="bool"),
+ force=dict(type="bool", default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- state = module.params.get('state')
+ state = module.params.get("state")
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
- client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
+ client = module.client("ses", retry_decorator=AWSRetry.jittered_backoff())
- if state == 'absent':
+ if state == "absent":
remove_rule_set(client, module)
else:
create_or_update_rule_set(client, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/sns.py b/ansible_collections/community/aws/plugins/modules/sns.py
index f72bbfa49..62c440c1f 100644
--- a/ansible_collections/community/aws/plugins/modules/sns.py
+++ b/ansible_collections/community/aws/plugins/modules/sns.py
@@ -4,11 +4,7 @@
# Copyright: (c) 2014, Michael J. Schultz <mjschultz@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: sns
short_description: Send Amazon Simple Notification Service messages
version_added: 1.0.0
@@ -96,12 +92,12 @@ options:
version_added: 5.4.0
extends_documentation_fragment:
-- amazon.aws.ec2
-- amazon.aws.aws
-- amazon.aws.boto3
-'''
+ - amazon.aws.region.modules
+ - amazon.aws.common.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Send default notification message via SNS
community.aws.sns:
msg: '{{ inventory_hostname }} has completed the play.'
@@ -139,7 +135,7 @@ EXAMPLES = """
delegate_to: localhost
"""
-RETURN = """
+RETURN = r"""
msg:
description: Human-readable diagnostic information
returned: always
@@ -159,32 +155,33 @@ sequence_number:
import json
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
- pass # Handled by AnsibleAWSModule
+ pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup
def main():
protocols = [
- 'http',
- 'https',
- 'email',
- 'email_json',
- 'sms',
- 'sqs',
- 'application',
- 'lambda',
+ "http",
+ "https",
+ "email",
+ "email_json",
+ "sms",
+ "sqs",
+ "application",
+ "lambda",
]
argument_spec = dict(
- msg=dict(required=True, aliases=['default']),
+ msg=dict(required=True, aliases=["default"]),
subject=dict(),
topic=dict(required=True),
- message_attributes=dict(type='dict'),
- message_structure=dict(choices=['json', 'string'], default='json'),
+ message_attributes=dict(type="dict"),
+ message_structure=dict(choices=["json", "string"], default="json"),
message_group_id=dict(),
message_deduplication_id=dict(),
)
@@ -195,50 +192,48 @@ def main():
module = AnsibleAWSModule(argument_spec=argument_spec)
sns_kwargs = dict(
- Message=module.params['msg'],
- Subject=module.params['subject'],
- MessageStructure=module.params['message_structure'],
+ Message=module.params["msg"],
+ Subject=module.params["subject"],
+ MessageStructure=module.params["message_structure"],
)
- if module.params['message_attributes']:
- if module.params['message_structure'] != 'string':
+ if module.params["message_attributes"]:
+ if module.params["message_structure"] != "string":
module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
- sns_kwargs['MessageAttributes'] = module.params['message_attributes']
+ sns_kwargs["MessageAttributes"] = module.params["message_attributes"]
if module.params["message_group_id"]:
sns_kwargs["MessageGroupId"] = module.params["message_group_id"]
if module.params["message_deduplication_id"]:
sns_kwargs["MessageDeduplicationId"] = module.params["message_deduplication_id"]
- dict_msg = {
- 'default': sns_kwargs['Message']
- }
+ dict_msg = {"default": sns_kwargs["Message"]}
for p in protocols:
if module.params[p]:
- if sns_kwargs['MessageStructure'] != 'json':
+ if sns_kwargs["MessageStructure"] != "json":
module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
- dict_msg[p.replace('_', '-')] = module.params[p]
+ dict_msg[p.replace("_", "-")] = module.params[p]
- client = module.client('sns')
+ client = module.client("sns")
- topic = module.params['topic']
- if ':' in topic:
+ topic = module.params["topic"]
+ if ":" in topic:
# Short names can't contain ':' so we'll assume this is the full ARN
- sns_kwargs['TopicArn'] = topic
+ sns_kwargs["TopicArn"] = topic
else:
- sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic)
+ sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic)
- if not sns_kwargs['TopicArn']:
- module.fail_json(msg='Could not find topic: {0}'.format(topic))
+ if not sns_kwargs["TopicArn"]:
+ module.fail_json(msg=f"Could not find topic: {topic}")
- if sns_kwargs['MessageStructure'] == 'json':
- sns_kwargs['Message'] = json.dumps(dict_msg)
+ if sns_kwargs["MessageStructure"] == "json":
+ sns_kwargs["Message"] = json.dumps(dict_msg)
try:
result = client.publish(**sns_kwargs)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to publish message')
+ module.fail_json_aws(e, msg="Failed to publish message")
sns_result = dict(msg="OK", message_id=result["MessageId"])
@@ -248,5 +243,5 @@ def main():
module.exit_json(**sns_result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/sns_topic.py b/ansible_collections/community/aws/plugins/modules/sns_topic.py
index 3c05be004..0fe7fbe33 100644
--- a/ansible_collections/community/aws/plugins/modules/sns_topic.py
+++ b/ansible_collections/community/aws/plugins/modules/sns_topic.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
version_added: 1.0.0
@@ -159,11 +156,11 @@ options:
notes:
- Support for I(tags) and I(purge_tags) was added in release 5.3.0.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.tags
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags.modules
- amazon.aws.boto3
-'''
+"""
EXAMPLES = r"""
@@ -182,7 +179,7 @@ EXAMPLES = r"""
numMinDelayRetries: 2
numNoDelayRetries: 2
backoffFunction: "linear"
- disableSubscriptionOverrides: True
+ disableSubscriptionOverrides: true
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
@@ -216,7 +213,7 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = r'''
+RETURN = r"""
sns_arn:
description: The ARN of the topic you are modifying
type: str
@@ -332,7 +329,7 @@ sns_topic:
returned: always
type: bool
sample: false
-'''
+"""
import json
@@ -341,38 +338,41 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.community.aws.plugins.module_utils.sns import list_topics
-from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup
-from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies
-from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions
+from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint
+from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies
from ansible_collections.community.aws.plugins.module_utils.sns import get_info
+from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions
+from ansible_collections.community.aws.plugins.module_utils.sns import list_topics
+from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup
from ansible_collections.community.aws.plugins.module_utils.sns import update_tags
class SnsTopicManager(object):
- """ Handles SNS Topic creation and destruction """
-
- def __init__(self,
- module,
- name,
- topic_type,
- state,
- display_name,
- policy,
- delivery_policy,
- subscriptions,
- purge_subscriptions,
- tags,
- purge_tags,
- content_based_deduplication,
- check_mode):
-
- self.connection = module.client('sns')
+ """Handles SNS Topic creation and destruction"""
+
+ def __init__(
+ self,
+ module,
+ name,
+ topic_type,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ tags,
+ purge_tags,
+ content_based_deduplication,
+ check_mode,
+ ):
+ self.connection = module.client("sns")
self.module = module
self.name = name
self.topic_type = topic_type
@@ -402,73 +402,80 @@ class SnsTopicManager(object):
# NOTE: Never set FifoTopic = False. Some regions (including GovCloud)
# don't support the attribute being set, even to False.
- if self.topic_type == 'fifo':
- attributes['FifoTopic'] = 'true'
- if not self.name.endswith('.fifo'):
- self.name = self.name + '.fifo'
+ if self.topic_type == "fifo":
+ attributes["FifoTopic"] = "true"
+ if not self.name.endswith(".fifo"):
+ self.name = self.name + ".fifo"
if self.tags:
tags = ansible_dict_to_boto3_tag_list(self.tags)
if not self.check_mode:
try:
- response = self.connection.create_topic(Name=self.name,
- Attributes=attributes,
- Tags=tags)
+ response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
- self.topic_arn = response['TopicArn']
+ self.module.fail_json_aws(e, msg=f"Couldn't create topic {self.name}")
+ self.topic_arn = response["TopicArn"]
return True
def _set_topic_attrs(self):
changed = False
try:
- topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
+ topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
+ self.module.fail_json_aws(e, msg=f"Couldn't get topic attributes for topic {self.topic_arn}")
- if self.display_name and self.display_name != topic_attributes['DisplayName']:
+ if self.display_name and self.display_name != topic_attributes["DisplayName"]:
changed = True
- self.attributes_set.append('display_name')
+ self.attributes_set.append("display_name")
if not self.check_mode:
try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
- AttributeValue=self.display_name)
+ self.connection.set_topic_attributes(
+ TopicArn=self.topic_arn, AttributeName="DisplayName", AttributeValue=self.display_name
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set display name")
- if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
+ if self.policy and compare_policies(self.policy, json.loads(topic_attributes["Policy"])):
changed = True
- self.attributes_set.append('policy')
+ self.attributes_set.append("policy")
if not self.check_mode:
try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
- AttributeValue=json.dumps(self.policy))
+ self.connection.set_topic_attributes(
+ TopicArn=self.topic_arn, AttributeName="Policy", AttributeValue=json.dumps(self.policy)
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic policy")
# Set content-based deduplication attribute. Ignore if topic_type is not fifo.
- if ("FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true") and \
- self.content_based_deduplication:
- enabled = "true" if self.content_based_deduplication in 'enabled' else "false"
- if enabled != topic_attributes['ContentBasedDeduplication']:
+ if (
+ "FifoTopic" in topic_attributes and topic_attributes["FifoTopic"] == "true"
+ ) and self.content_based_deduplication:
+ enabled = "true" if self.content_based_deduplication in "enabled" else "false"
+ if enabled != topic_attributes["ContentBasedDeduplication"]:
changed = True
- self.attributes_set.append('content_based_deduplication')
+ self.attributes_set.append("content_based_deduplication")
if not self.check_mode:
try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='ContentBasedDeduplication',
- AttributeValue=enabled)
+ self.connection.set_topic_attributes(
+ TopicArn=self.topic_arn, AttributeName="ContentBasedDeduplication", AttributeValue=enabled
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set content-based deduplication")
- if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
- compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
+ if self.delivery_policy and (
+ "DeliveryPolicy" not in topic_attributes
+ or compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes["DeliveryPolicy"]))
+ ):
changed = True
- self.attributes_set.append('delivery_policy')
+ self.attributes_set.append("delivery_policy")
if not self.check_mode:
try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
- AttributeValue=json.dumps(self.delivery_policy))
+ self.connection.set_topic_attributes(
+ TopicArn=self.topic_arn,
+ AttributeName="DeliveryPolicy",
+ AttributeValue=json.dumps(self.delivery_policy),
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
return changed
@@ -476,20 +483,23 @@ class SnsTopicManager(object):
def _set_topic_subs(self):
changed = False
subscriptions_existing_list = set()
- desired_subscriptions = [(sub['protocol'],
- canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
- self.subscriptions]
+ desired_subscriptions = [
+ (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"])) for sub in self.subscriptions
+ ]
for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn):
- sub_key = (sub['Protocol'], sub['Endpoint'])
+ sub_key = (sub["Protocol"], sub["Endpoint"])
subscriptions_existing_list.add(sub_key)
- if (self.purge_subscriptions and sub_key not in desired_subscriptions and
- sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
+ if (
+ self.purge_subscriptions
+ and sub_key not in desired_subscriptions
+ and sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted")
+ ):
changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
try:
- self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
+ self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
@@ -500,13 +510,13 @@ class SnsTopicManager(object):
try:
self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
+ self.module.fail_json_aws(e, msg=f"Couldn't subscribe to topic {self.topic_arn}")
return changed
def _init_desired_subscription_attributes(self):
for sub in self.subscriptions:
- sub_key = (sub['protocol'], canonicalize_endpoint(sub['protocol'], sub['endpoint']))
- tmp_dict = sub.get('attributes', {})
+ sub_key = (sub["protocol"], canonicalize_endpoint(sub["protocol"], sub["endpoint"]))
+ tmp_dict = sub.get("attributes", {})
# aws sdk expects values to be strings
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#SNS.Client.set_subscription_attributes
for k, v in tmp_dict.items():
@@ -517,26 +527,28 @@ class SnsTopicManager(object):
def _set_topic_subs_attributes(self):
changed = False
for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn):
- sub_key = (sub['Protocol'], sub['Endpoint'])
- sub_arn = sub['SubscriptionArn']
+ sub_key = (sub["Protocol"], sub["Endpoint"])
+ sub_arn = sub["SubscriptionArn"]
if not self.desired_subscription_attributes.get(sub_key):
# subscription attributes aren't defined in desired, skipping
continue
try:
- sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)['Attributes']
+ sub_current_attributes = self.connection.get_subscription_attributes(SubscriptionArn=sub_arn)[
+ "Attributes"
+ ]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn)
+ self.module.fail_json_aws(e, f"Couldn't get subscription attributes for subscription {sub_arn}")
- raw_message = self.desired_subscription_attributes[sub_key].get('RawMessageDelivery')
- if raw_message is not None and 'RawMessageDelivery' in sub_current_attributes:
- if sub_current_attributes['RawMessageDelivery'].lower() != raw_message.lower():
+ raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery")
+ if raw_message is not None and "RawMessageDelivery" in sub_current_attributes:
+ if sub_current_attributes["RawMessageDelivery"].lower() != raw_message.lower():
changed = True
if not self.check_mode:
try:
- self.connection.set_subscription_attributes(SubscriptionArn=sub_arn,
- AttributeName='RawMessageDelivery',
- AttributeValue=raw_message)
+ self.connection.set_subscription_attributes(
+ SubscriptionArn=sub_arn, AttributeName="RawMessageDelivery", AttributeValue=raw_message
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, "Couldn't set RawMessageDelivery subscription attribute")
@@ -549,11 +561,11 @@ class SnsTopicManager(object):
if not subscriptions:
return False
for sub in subscriptions:
- if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
- self.subscriptions_deleted.append(sub['SubscriptionArn'])
+ if sub["SubscriptionArn"] not in ("PendingConfirmation", "Deleted"):
+ self.subscriptions_deleted.append(sub["SubscriptionArn"])
if not self.check_mode:
try:
- self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
+ self.connection.unsubscribe(SubscriptionArn=sub["SubscriptionArn"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
return True
@@ -564,11 +576,11 @@ class SnsTopicManager(object):
try:
self.connection.delete_topic(TopicArn=self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
+ self.module.fail_json_aws(e, msg=f"Couldn't delete topic {self.topic_arn}")
return True
def _name_is_arn(self):
- return self.name.startswith('arn:')
+ return bool(parse_aws_arn(self.name))
def ensure_ok(self):
changed = False
@@ -578,7 +590,9 @@ class SnsTopicManager(object):
if self.topic_arn in list_topics(self.connection, self.module):
changed |= self._set_topic_attrs()
elif self.display_name or self.policy or self.delivery_policy:
- self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
+ self.module.fail_json(
+ msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account"
+ )
changed |= self._set_topic_subs()
self._init_desired_subscription_attributes()
if self.topic_arn in list_topics(self.connection, self.module):
@@ -595,7 +609,9 @@ class SnsTopicManager(object):
self.populate_topic_arn()
if self.topic_arn:
if self.topic_arn not in list_topics(self.connection, self.module):
- self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
+ self.module.fail_json(
+ msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe"
+ )
changed = self._delete_subscriptions()
changed |= self._delete_topic()
return changed
@@ -606,7 +622,7 @@ class SnsTopicManager(object):
return
name = self.name
- if self.topic_type == 'fifo' and not name.endswith('.fifo'):
+ if self.topic_type == "fifo" and not name.endswith(".fifo"):
name += ".fifo"
self.topic_arn = topic_arn_lookup(self.connection, self.module, name)
@@ -615,83 +631,87 @@ def main():
# We're kinda stuck with CamelCase here, it would be nice to switch to
# snake_case, but we'd need to purge out the alias entries
http_retry_args = dict(
- minDelayTarget=dict(type='int', required=True),
- maxDelayTarget=dict(type='int', required=True),
- numRetries=dict(type='int', required=True),
- numMaxDelayRetries=dict(type='int', required=True),
- numMinDelayRetries=dict(type='int', required=True),
- numNoDelayRetries=dict(type='int', required=True),
- backoffFunction=dict(type='str', required=True, choices=['arithmetic', 'exponential', 'geometric', 'linear']),
+ minDelayTarget=dict(type="int", required=True),
+ maxDelayTarget=dict(type="int", required=True),
+ numRetries=dict(type="int", required=True),
+ numMaxDelayRetries=dict(type="int", required=True),
+ numMinDelayRetries=dict(type="int", required=True),
+ numNoDelayRetries=dict(type="int", required=True),
+ backoffFunction=dict(type="str", required=True, choices=["arithmetic", "exponential", "geometric", "linear"]),
)
http_delivery_args = dict(
- defaultHealthyRetryPolicy=dict(type='dict', required=True, options=http_retry_args),
- disableSubscriptionOverrides=dict(type='bool', required=False),
+ defaultHealthyRetryPolicy=dict(type="dict", required=True, options=http_retry_args),
+ disableSubscriptionOverrides=dict(type="bool", required=False),
defaultThrottlePolicy=dict(
- type='dict', required=False,
+ type="dict",
+ required=False,
options=dict(
- maxReceivesPerSecond=dict(type='int', required=True),
+ maxReceivesPerSecond=dict(type="int", required=True),
),
),
)
delivery_args = dict(
- http=dict(type='dict', required=False, options=http_delivery_args),
+ http=dict(type="dict", required=False, options=http_delivery_args),
)
argument_spec = dict(
name=dict(required=True),
- topic_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
- state=dict(default='present', choices=['present', 'absent']),
+ topic_type=dict(type="str", default="standard", choices=["standard", "fifo"]),
+ state=dict(default="present", choices=["present", "absent"]),
display_name=dict(),
- policy=dict(type='dict'),
- delivery_policy=dict(type='dict', options=delivery_args),
- subscriptions=dict(default=[], type='list', elements='dict'),
- purge_subscriptions=dict(type='bool', default=True),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- content_based_deduplication=dict(choices=['enabled', 'disabled'])
+ policy=dict(type="dict"),
+ delivery_policy=dict(type="dict", options=delivery_args),
+ subscriptions=dict(default=[], type="list", elements="dict"),
+ purge_subscriptions=dict(type="bool", default=True),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ content_based_deduplication=dict(choices=["enabled", "disabled"]),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- name = module.params.get('name')
- topic_type = module.params.get('topic_type')
- state = module.params.get('state')
- display_name = module.params.get('display_name')
- policy = module.params.get('policy')
- delivery_policy = module.params.get('delivery_policy')
- subscriptions = module.params.get('subscriptions')
- purge_subscriptions = module.params.get('purge_subscriptions')
- content_based_deduplication = module.params.get('content_based_deduplication')
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ name = module.params.get("name")
+ topic_type = module.params.get("topic_type")
+ state = module.params.get("state")
+ display_name = module.params.get("display_name")
+ policy = module.params.get("policy")
+ delivery_policy = module.params.get("delivery_policy")
+ subscriptions = module.params.get("subscriptions")
+ purge_subscriptions = module.params.get("purge_subscriptions")
+ content_based_deduplication = module.params.get("content_based_deduplication")
check_mode = module.check_mode
- tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
-
- sns_topic = SnsTopicManager(module,
- name,
- topic_type,
- state,
- display_name,
- policy,
- delivery_policy,
- subscriptions,
- purge_subscriptions,
- tags,
- purge_tags,
- content_based_deduplication,
- check_mode)
-
- if state == 'present':
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+
+ sns_topic = SnsTopicManager(
+ module,
+ name,
+ topic_type,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ tags,
+ purge_tags,
+ content_based_deduplication,
+ check_mode,
+ )
+
+ if state == "present":
changed = sns_topic.ensure_ok()
- elif state == 'absent':
+ elif state == "absent":
changed = sns_topic.ensure_gone()
- sns_facts = dict(changed=changed,
- sns_arn=sns_topic.topic_arn,
- sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn))
+ sns_facts = dict(
+ changed=changed,
+ sns_arn=sns_topic.topic_arn,
+ sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn),
+ )
module.exit_json(**sns_facts)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/sns_topic_info.py b/ansible_collections/community/aws/plugins/modules/sns_topic_info.py
index ca6dd1aab..8cd712804 100644
--- a/ansible_collections/community/aws/plugins/modules/sns_topic_info.py
+++ b/ansible_collections/community/aws/plugins/modules/sns_topic_info.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: sns_topic_info
short_description: sns_topic_info module
version_added: 3.2.0
@@ -21,12 +18,12 @@ options:
required: false
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
+- amazon.aws.common.modules
+- amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: list all the topics
community.aws.sns_topic_info:
register: sns_topic_list
@@ -35,9 +32,9 @@ EXAMPLES = r'''
community.aws.sns_topic_info:
topic_arn: "{{ sns_arn }}"
register: sns_topic_info
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- The result contaning the details of one or all AWS SNS topics.
@@ -132,7 +129,7 @@ result:
description: The type of topic.
type: str
sample: "standard"
-'''
+"""
try:
@@ -140,26 +137,26 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.community.aws.plugins.module_utils.sns import list_topics
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.sns import get_info
+from ansible_collections.community.aws.plugins.module_utils.sns import list_topics
def main():
argument_spec = dict(
- topic_arn=dict(type='str', required=False),
+ topic_arn=dict(type="str", required=False),
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- topic_arn = module.params.get('topic_arn')
+ topic_arn = module.params.get("topic_arn")
try:
- connection = module.client('sns', retry_decorator=AWSRetry.jittered_backoff())
+ connection = module.client("sns", retry_decorator=AWSRetry.jittered_backoff())
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS.')
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
if topic_arn:
results = dict(sns_arn=topic_arn, sns_topic=get_info(connection, module, topic_arn))
@@ -169,5 +166,5 @@ def main():
module.exit_json(result=results)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/sqs_queue.py b/ansible_collections/community/aws/plugins/modules/sqs_queue.py
index 211e64b26..ad3ce68a7 100644
--- a/ansible_collections/community/aws/plugins/modules/sqs_queue.py
+++ b/ansible_collections/community/aws/plugins/modules/sqs_queue.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: sqs_queue
version_added: 1.0.0
@@ -104,13 +102,13 @@ options:
- Enables content-based deduplication. Used for FIFOs only.
- Defaults to C(false).
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
-'''
+ - amazon.aws.boto3
+"""
-RETURN = r'''
+RETURN = r"""
content_based_deduplication:
description: Enables content-based deduplication. Used for FIFOs only.
type: bool
@@ -186,9 +184,9 @@ tags:
type: dict
returned: always
sample: '{"Env": "prod"}'
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create SQS queue with redrive policy
community.aws.sqs_queue:
name: my-queue
@@ -258,7 +256,7 @@ EXAMPLES = r'''
name: my-queue
region: ap-southeast-2
state: absent
-'''
+"""
import json
@@ -270,26 +268,27 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_queue_name(module, is_fifo=False):
- name = module.params.get('name')
- if not is_fifo or name.endswith('.fifo'):
+ name = module.params.get("name")
+ if not is_fifo or name.endswith(".fifo"):
return name
- return name + '.fifo'
+ return name + ".fifo"
# NonExistentQueue is explicitly expected when a queue doesn't exist
@AWSRetry.jittered_backoff()
def get_queue_url(client, name):
try:
- return client.get_queue_url(QueueName=name)['QueueUrl']
- except is_boto3_error_code('AWS.SimpleQueueService.NonExistentQueue'):
+ return client.get_queue_url(QueueName=name)["QueueUrl"]
+ except is_boto3_error_code("AWS.SimpleQueueService.NonExistentQueue"):
return None
@@ -297,13 +296,13 @@ def describe_queue(client, queue_url):
"""
Description a queue in snake format
"""
- attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
+ attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)["Attributes"]
description = dict(attributes)
- description.pop('Policy', None)
- description.pop('RedrivePolicy', None)
+ description.pop("Policy", None)
+ description.pop("RedrivePolicy", None)
description = camel_dict_to_snake_dict(description)
- description['policy'] = attributes.get('Policy', None)
- description['redrive_policy'] = attributes.get('RedrivePolicy', None)
+ description["policy"] = attributes.get("Policy", None)
+ description["redrive_policy"] = attributes.get("RedrivePolicy", None)
# Boto3 returns everything as a string, convert them back to integers/dicts if
# that's what we expected.
@@ -311,12 +310,12 @@ def describe_queue(client, queue_url):
if value is None:
continue
- if key in ['policy', 'redrive_policy']:
+ if key in ["policy", "redrive_policy"]:
policy = json.loads(value)
description[key] = policy
continue
- if key == 'content_based_deduplication':
+ if key == "content_based_deduplication":
try:
description[key] = bool(value)
except (TypeError, ValueError):
@@ -332,49 +331,48 @@ def describe_queue(client, queue_url):
def create_or_update_sqs_queue(client, module):
- is_fifo = (module.params.get('queue_type') == 'fifo')
- kms_master_key_id = module.params.get('kms_master_key_id')
+ is_fifo = module.params.get("queue_type") == "fifo"
+ kms_master_key_id = module.params.get("kms_master_key_id")
queue_name = get_queue_name(module, is_fifo)
result = dict(
name=queue_name,
- region=module.params.get('region'),
+ region=module.params.get("region"),
changed=False,
)
queue_url = get_queue_url(client, queue_name)
- result['queue_url'] = queue_url
+ result["queue_url"] = queue_url
# Create a dict() to hold attributes that will be passed to boto3
create_attributes = {}
if not queue_url:
if is_fifo:
- create_attributes['FifoQueue'] = "True"
+ create_attributes["FifoQueue"] = "True"
if kms_master_key_id:
- create_attributes['KmsMasterKeyId'] = kms_master_key_id
- result['changed'] = True
+ create_attributes["KmsMasterKeyId"] = kms_master_key_id
+ result["changed"] = True
if module.check_mode:
return result
- queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl']
+ queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)["QueueUrl"]
changed, arn = update_sqs_queue(module, client, queue_url)
- result['changed'] |= changed
- result['queue_arn'] = arn
+ result["changed"] |= changed
+ result["queue_arn"] = arn
changed, tags = update_tags(client, queue_url, module)
- result['changed'] |= changed
- result['tags'] = tags
+ result["changed"] |= changed
+ result["tags"] = tags
result.update(describe_queue(client, queue_url))
COMPATABILITY_KEYS = dict(
- delay_seconds='delivery_delay',
- receive_message_wait_time_seconds='receive_message_wait_time',
- visibility_timeout='default_visibility_timeout',
- kms_data_key_reuse_period_seconds='kms_data_key_reuse_period',
+ delay_seconds="delivery_delay",
+ receive_message_wait_time_seconds="receive_message_wait_time",
+ visibility_timeout="default_visibility_timeout",
+ kms_data_key_reuse_period_seconds="kms_data_key_reuse_period",
)
for key in list(result.keys()):
-
# The return values changed between boto and boto3, add the old keys too
# for backwards compatibility
return_name = COMPATABILITY_KEYS.get(key)
@@ -387,30 +385,32 @@ def create_or_update_sqs_queue(client, module):
def update_sqs_queue(module, client, queue_url):
check_mode = module.check_mode
changed = False
- existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
+ existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"], aws_retry=True)[
+ "Attributes"
+ ]
new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True)
attributes_to_set = dict()
# Boto3 SQS deals with policies as strings, we want to deal with them as
# dicts
- if module.params.get('policy') is not None:
- policy = module.params.get('policy')
- current_value = existing_attributes.get('Policy', '{}')
+ if module.params.get("policy") is not None:
+ policy = module.params.get("policy")
+ current_value = existing_attributes.get("Policy", "{}")
current_policy = json.loads(current_value)
if compare_policies(current_policy, policy):
- attributes_to_set['Policy'] = json.dumps(policy)
+ attributes_to_set["Policy"] = json.dumps(policy)
changed = True
- if module.params.get('redrive_policy') is not None:
- policy = module.params.get('redrive_policy')
- current_value = existing_attributes.get('RedrivePolicy', '{}')
+ if module.params.get("redrive_policy") is not None:
+ policy = module.params.get("redrive_policy")
+ current_value = existing_attributes.get("RedrivePolicy", "{}")
current_policy = json.loads(current_value)
if compare_policies(current_policy, policy):
- attributes_to_set['RedrivePolicy'] = json.dumps(policy)
+ attributes_to_set["RedrivePolicy"] = json.dumps(policy)
changed = True
for attribute, value in existing_attributes.items():
# We handle these as a special case because they're IAM policies
- if attribute in ['Policy', 'RedrivePolicy']:
+ if attribute in ["Policy", "RedrivePolicy"]:
continue
if attribute not in new_attributes.keys():
@@ -435,23 +435,19 @@ def update_sqs_queue(module, client, queue_url):
if changed and not check_mode:
client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True)
- return changed, existing_attributes.get('queue_arn')
+ return changed, existing_attributes.get("queue_arn")
def delete_sqs_queue(client, module):
- is_fifo = (module.params.get('queue_type') == 'fifo')
+ is_fifo = module.params.get("queue_type") == "fifo"
queue_name = get_queue_name(module, is_fifo)
- result = dict(
- name=queue_name,
- region=module.params.get('region'),
- changed=False
- )
+ result = dict(name=queue_name, region=module.params.get("region"), changed=False)
queue_url = get_queue_url(client, queue_name)
if not queue_url:
return result
- result['changed'] = bool(queue_url)
+ result["changed"] = bool(queue_url)
if not module.check_mode:
AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url)
@@ -459,13 +455,13 @@ def delete_sqs_queue(client, module):
def update_tags(client, queue_url, module):
- new_tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
+ new_tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
if new_tags is None:
return False, {}
try:
- existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags']
+ existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)["Tags"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e:
existing_tags = {}
@@ -476,7 +472,7 @@ def update_tags(client, queue_url, module):
client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True)
if tags_to_add:
client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add)
- existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {})
+ existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get("Tags", {})
else:
existing_tags = new_tags
@@ -485,41 +481,40 @@ def update_tags(client, queue_url, module):
def main():
-
argument_spec = dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- name=dict(type='str', required=True),
- queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
- delay_seconds=dict(type='int', aliases=['delivery_delay']),
- maximum_message_size=dict(type='int'),
- message_retention_period=dict(type='int'),
- policy=dict(type='dict'),
- receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']),
- redrive_policy=dict(type='dict'),
- visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']),
- kms_master_key_id=dict(type='str'),
- fifo_throughput_limit=dict(type='str', choices=["perQueue", "perMessageGroupId"]),
- deduplication_scope=dict(type='str', choices=['queue', 'messageGroup']),
- kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period'], no_log=False),
- content_based_deduplication=dict(type='bool'),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ queue_type=dict(type="str", default="standard", choices=["standard", "fifo"]),
+ delay_seconds=dict(type="int", aliases=["delivery_delay"]),
+ maximum_message_size=dict(type="int"),
+ message_retention_period=dict(type="int"),
+ policy=dict(type="dict"),
+ receive_message_wait_time_seconds=dict(type="int", aliases=["receive_message_wait_time"]),
+ redrive_policy=dict(type="dict"),
+ visibility_timeout=dict(type="int", aliases=["default_visibility_timeout"]),
+ kms_master_key_id=dict(type="str"),
+ fifo_throughput_limit=dict(type="str", choices=["perQueue", "perMessageGroupId"]),
+ deduplication_scope=dict(type="str", choices=["queue", "messageGroup"]),
+ kms_data_key_reuse_period_seconds=dict(type="int", aliases=["kms_data_key_reuse_period"], no_log=False),
+ content_based_deduplication=dict(type="bool"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- state = module.params.get('state')
- retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue'])
+ state = module.params.get("state")
+ retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["AWS.SimpleQueueService.NonExistentQueue"])
try:
- client = module.client('sqs', retry_decorator=retry_decorator)
- if state == 'present':
+ client = module.client("sqs", retry_decorator=retry_decorator)
+ if state == "present":
result = create_or_update_sqs_queue(client, module)
- elif state == 'absent':
+ elif state == "absent":
result = delete_sqs_queue(client, module)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to control sqs queue')
+ module.fail_json_aws(e, msg="Failed to control sqs queue")
else:
module.exit_json(**result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py b/ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py
new file mode 100644
index 000000000..c5b849097
--- /dev/null
+++ b/ansible_collections/community/aws/plugins/modules/ssm_inventory_info.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = """
+module: ssm_inventory_info
+version_added: 6.0.0
+short_description: Get SSM inventory information for EC2 instance
+
+description:
+ - Gather SSM inventory for EC2 instance configured with SSM.
+
+author: 'Aubin Bikouo (@abikouo)'
+
+options:
+ instance_id:
+ description:
+ - EC2 instance id.
+ required: true
+ type: str
+
+extends_documentation_fragment:
+- amazon.aws.common.modules
+- amazon.aws.region.modules
+- amazon.aws.boto3
+"""
+
+EXAMPLES = """
+- name: Retrieve SSM inventory info for instance id 'i-012345678902'
+ community.aws.ssm_inventory_info:
+ instance_id: 'i-012345678902'
+"""
+
+
+RETURN = """
+ssm_inventory:
+ returned: on success
+ description: >
+ SSM inventory information.
+ type: dict
+ sample: {
+ 'agent_type': 'amazon-ssm-agent',
+ 'agent_version': '3.2.582.0',
+ 'computer_name': 'ip-172-31-44-166.ec2.internal',
+ 'instance_id': 'i-039eb9b1f55934ab6',
+ 'instance_status': 'Active',
+ 'ip_address': '172.31.44.166',
+ 'platform_name': 'Fedora Linux',
+ 'platform_type': 'Linux',
+ 'platform_version': '37',
+ 'resource_type': 'EC2Instance'
+ }
+"""
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+
+class SsmInventoryInfoFailure(Exception):
+ def __init__(self, exc, msg):
+ self.exc = exc
+ self.msg = msg
+ super().__init__(self)
+
+
+def get_ssm_inventory(connection, filters):
+ try:
+ return connection.get_inventory(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ raise SsmInventoryInfoFailure(exc=e, msg="get_ssm_inventory() failed.")
+
+
+def execute_module(module, connection):
+ instance_id = module.params.get("instance_id")
+ try:
+ filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": [instance_id]}]
+
+ response = get_ssm_inventory(connection, filters)
+ entities = response.get("Entities", [])
+ ssm_inventory = {}
+ if entities:
+ content = entities[0].get("Data", {}).get("AWS:InstanceInformation", {}).get("Content", [])
+ if content:
+ ssm_inventory = camel_dict_to_snake_dict(content[0])
+ module.exit_json(changed=False, ssm_inventory=ssm_inventory)
+ except SsmInventoryInfoFailure as e:
+ module.fail_json_aws(exception=e.exc, msg=e.msg)
+
+
+def main():
+ argument_spec = dict(
+ instance_id=dict(required=True, type="str"),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ connection = module.client("ssm")
+ execute_module(module, connection)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/aws/plugins/modules/ssm_parameter.py b/ansible_collections/community/aws/plugins/modules/ssm_parameter.py
index c435305c2..aefafca00 100644
--- a/ansible_collections/community/aws/plugins/modules/ssm_parameter.py
+++ b/ansible_collections/community/aws/plugins/modules/ssm_parameter.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: ssm_parameter
version_added: 1.0.0
@@ -86,18 +84,17 @@ author:
- "Bill Wang (@ozbillwang) <ozbillwang@gmail.com>"
- "Michael De La Rue (@mikedlr)"
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
-
notes:
- Support for I(tags) and I(purge_tags) was added in release 5.3.0.
-'''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create or update key/value pair in AWS SSM parameter store
community.aws.ssm_parameter:
name: "Hello"
@@ -165,9 +162,9 @@ EXAMPLES = '''
community.aws.ssm_parameter:
name: "Hello"
tags: {}
-'''
+"""
-RETURN = '''
+RETURN = r"""
parameter_metadata:
type: dict
description:
@@ -242,30 +239,32 @@ parameter_metadata:
returned: when the parameter has tags
example: {'MyTagName': 'Some Value'}
version_added: 5.3.0
-'''
+"""
import time
try:
import botocore
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+from ansible_collections.community.aws.plugins.module_utils.base import BaseWaiterFactory
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class ParameterWaiterFactory(BaseWaiterFactory):
def __init__(self, module):
- client = module.client('ssm')
+ client = module.client("ssm")
super(ParameterWaiterFactory, self).__init__(module, client)
@property
@@ -273,22 +272,24 @@ class ParameterWaiterFactory(BaseWaiterFactory):
data = super(ParameterWaiterFactory, self)._waiter_model_data
ssm_data = dict(
parameter_exists=dict(
- operation='DescribeParameters',
- delay=1, maxAttempts=20,
+ operation="DescribeParameters",
+ delay=1,
+ maxAttempts=20,
acceptors=[
- dict(state='retry', matcher='error', expected='ParameterNotFound'),
- dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) == `0`'),
- dict(state='success', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'),
- ]
+ dict(state="retry", matcher="error", expected="ParameterNotFound"),
+ dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) == `0`"),
+ dict(state="success", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"),
+ ],
),
parameter_deleted=dict(
- operation='DescribeParameters',
- delay=1, maxAttempts=20,
+ operation="DescribeParameters",
+ delay=1,
+ maxAttempts=20,
acceptors=[
- dict(state='retry', matcher='path', expected=True, argument='length(Parameters[].Name) > `0`'),
- dict(state='success', matcher='path', expected=True, argument='length(Parameters[]) == `0`'),
- dict(state='success', matcher='error', expected='ParameterNotFound'),
- ]
+ dict(state="retry", matcher="path", expected=True, argument="length(Parameters[].Name) > `0`"),
+ dict(state="success", matcher="path", expected=True, argument="length(Parameters[]) == `0`"),
+ dict(state="success", matcher="error", expected="ParameterNotFound"),
+ ],
),
)
data.update(ssm_data)
@@ -299,10 +300,10 @@ def _wait_exists(client, module, name):
if module.check_mode:
return
wf = ParameterWaiterFactory(module)
- waiter = wf.get_waiter('parameter_exists')
+ waiter = wf.get_waiter("parameter_exists")
try:
waiter.wait(
- ParameterFilters=[{'Key': 'Name', "Values": [name]}],
+ ParameterFilters=[{"Key": "Name", "Values": [name]}],
)
except botocore.exceptions.WaiterError:
module.warn("Timeout waiting for parameter to exist")
@@ -317,7 +318,7 @@ def _wait_updated(client, module, name, version):
for x in range(1, 10):
try:
parameter = describe_parameter(client, module, ParameterFilters=[{"Key": "Name", "Values": [name]}])
- if parameter.get('Version', 0) > version:
+ if parameter.get("Version", 0) > version:
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe parameter while waiting for update")
@@ -328,10 +329,10 @@ def _wait_deleted(client, module, name):
if module.check_mode:
return
wf = ParameterWaiterFactory(module)
- waiter = wf.get_waiter('parameter_deleted')
+ waiter = wf.get_waiter("parameter_deleted")
try:
waiter.wait(
- ParameterFilters=[{'Key': 'Name', "Values": [name]}],
+ ParameterFilters=[{"Key": "Name", "Values": [name]}],
)
except botocore.exceptions.WaiterError:
module.warn("Timeout waiting for parameter to exist")
@@ -341,24 +342,27 @@ def _wait_deleted(client, module, name):
def tag_parameter(client, module, parameter_name, tags):
try:
- return client.add_tags_to_resource(aws_retry=True, ResourceType='Parameter',
- ResourceId=parameter_name, Tags=tags)
+ return client.add_tags_to_resource(
+ aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, Tags=tags
+ )
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to add tag(s) to parameter")
def untag_parameter(client, module, parameter_name, tag_keys):
try:
- return client.remove_tags_from_resource(aws_retry=True, ResourceType='Parameter',
- ResourceId=parameter_name, TagKeys=tag_keys)
+ return client.remove_tags_from_resource(
+ aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name, TagKeys=tag_keys
+ )
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to remove tag(s) from parameter")
def get_parameter_tags(client, module, parameter_name):
try:
- tags = client.list_tags_for_resource(aws_retry=True, ResourceType='Parameter',
- ResourceId=parameter_name)['TagList']
+ tags = client.list_tags_for_resource(aws_retry=True, ResourceType="Parameter", ResourceId=parameter_name)[
+ "TagList"
+ ]
tags_dict = boto3_tag_list_to_ansible_dict(tags)
return tags_dict
except (BotoCoreError, ClientError) as e:
@@ -373,14 +377,12 @@ def update_parameter_tags(client, module, parameter_name, supplied_tags):
return False, response
current_tags = get_parameter_tags(client, module, parameter_name)
- tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags,
- module.params.get('purge_tags'))
+ tags_to_add, tags_to_remove = compare_aws_tags(current_tags, supplied_tags, module.params.get("purge_tags"))
if tags_to_add:
if module.check_mode:
return True, response
- response = tag_parameter(client, module, parameter_name,
- ansible_dict_to_boto3_tag_list(tags_to_add))
+ response = tag_parameter(client, module, parameter_name, ansible_dict_to_boto3_tag_list(tags_to_add))
changed = True
if tags_to_remove:
if module.check_mode:
@@ -408,16 +410,16 @@ def update_parameter(client, module, **args):
@AWSRetry.jittered_backoff()
def describe_parameter(client, module, **args):
- paginator = client.get_paginator('describe_parameters')
+ paginator = client.get_paginator("describe_parameters")
existing_parameter = paginator.paginate(**args).build_full_result()
- if not existing_parameter['Parameters']:
+ if not existing_parameter["Parameters"]:
return None
- tags_dict = get_parameter_tags(client, module, module.params.get('name'))
- existing_parameter['Parameters'][0]['tags'] = tags_dict
+ tags_dict = get_parameter_tags(client, module, module.params.get("name"))
+ existing_parameter["Parameters"][0]["tags"] = tags_dict
- return existing_parameter['Parameters'][0]
+ return existing_parameter["Parameters"][0]
def create_update_parameter(client, module):
@@ -425,82 +427,78 @@ def create_update_parameter(client, module):
existing_parameter = None
response = {}
- args = dict(
- Name=module.params.get('name'),
- Type=module.params.get('string_type'),
- Tier=module.params.get('tier')
- )
+ args = dict(Name=module.params.get("name"), Type=module.params.get("string_type"), Tier=module.params.get("tier"))
- if (module.params.get('overwrite_value') in ("always", "changed")):
+ if module.params.get("overwrite_value") in ("always", "changed"):
args.update(Overwrite=True)
else:
args.update(Overwrite=False)
- if module.params.get('value') is not None:
- args.update(Value=module.params.get('value'))
+ if module.params.get("value") is not None:
+ args.update(Value=module.params.get("value"))
- if module.params.get('description'):
- args.update(Description=module.params.get('description'))
+ if module.params.get("description"):
+ args.update(Description=module.params.get("description"))
- if module.params.get('string_type') == 'SecureString':
- args.update(KeyId=module.params.get('key_id'))
+ if module.params.get("string_type") == "SecureString":
+ args.update(KeyId=module.params.get("key_id"))
try:
- existing_parameter = client.get_parameter(aws_retry=True, Name=args['Name'], WithDecryption=True)
+ existing_parameter = client.get_parameter(aws_retry=True, Name=args["Name"], WithDecryption=True)
except botocore.exceptions.ClientError:
pass
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="fetching parameter")
if existing_parameter:
- original_version = existing_parameter['Parameter']['Version']
- if 'Value' not in args:
- args['Value'] = existing_parameter['Parameter']['Value']
+ original_version = existing_parameter["Parameter"]["Version"]
+ if "Value" not in args:
+ args["Value"] = existing_parameter["Parameter"]["Value"]
- if (module.params.get('overwrite_value') == 'always'):
+ if module.params.get("overwrite_value") == "always":
(changed, response) = update_parameter(client, module, **args)
- elif (module.params.get('overwrite_value') == 'changed'):
- if existing_parameter['Parameter']['Type'] != args['Type']:
+ elif module.params.get("overwrite_value") == "changed":
+ if existing_parameter["Parameter"]["Type"] != args["Type"]:
(changed, response) = update_parameter(client, module, **args)
- elif existing_parameter['Parameter']['Value'] != args['Value']:
+ elif existing_parameter["Parameter"]["Value"] != args["Value"]:
(changed, response) = update_parameter(client, module, **args)
- elif args.get('Description'):
+ elif args.get("Description"):
# Description field not available from get_parameter function so get it from describe_parameters
try:
describe_existing_parameter = describe_parameter(
- client, module,
- ParameterFilters=[{"Key": "Name", "Values": [args['Name']]}])
+ client, module, ParameterFilters=[{"Key": "Name", "Values": [args["Name"]]}]
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="getting description value")
- if describe_existing_parameter.get('Description') != args['Description']:
+ if describe_existing_parameter.get("Description") != args["Description"]:
(changed, response) = update_parameter(client, module, **args)
if changed:
- _wait_updated(client, module, module.params.get('name'), original_version)
+ _wait_updated(client, module, module.params.get("name"), original_version)
# Handle tag updates for existing parameters
- if module.params.get('overwrite_value') != 'never':
+ if module.params.get("overwrite_value") != "never":
tags_changed, tags_response = update_parameter_tags(
- client, module, existing_parameter['Parameter']['Name'],
- module.params.get('tags'))
+ client, module, existing_parameter["Parameter"]["Name"], module.params.get("tags")
+ )
changed = changed or tags_changed
if tags_response:
- response['tag_updates'] = tags_response
+ response["tag_updates"] = tags_response
else:
# Add tags in initial creation request
- if module.params.get('tags'):
- args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get('tags')))
+ if module.params.get("tags"):
+ args.update(Tags=ansible_dict_to_boto3_tag_list(module.params.get("tags")))
# Overwrite=True conflicts with tags and is not needed for new param
args.update(Overwrite=False)
(changed, response) = update_parameter(client, module, **args)
- _wait_exists(client, module, module.params.get('name'))
+ _wait_exists(client, module, module.params.get("name"))
return changed, response
@@ -509,8 +507,8 @@ def delete_parameter(client, module):
response = {}
try:
- existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get('name'), WithDecryption=True)
- except is_boto3_error_code('ParameterNotFound'):
+ existing_parameter = client.get_parameter(aws_retry=True, Name=module.params.get("name"), WithDecryption=True)
+ except is_boto3_error_code("ParameterNotFound"):
return False, {}
except botocore.exceptions.ClientError:
# If we can't describe the parameter we may still be able to delete it
@@ -524,23 +522,23 @@ def delete_parameter(client, module):
return True, {}
try:
- response = client.delete_parameter(
- aws_retry=True,
- Name=module.params.get('name')
- )
- except is_boto3_error_code('ParameterNotFound'):
+ response = client.delete_parameter(aws_retry=True, Name=module.params.get("name"))
+ except is_boto3_error_code("ParameterNotFound"):
return False, {}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="deleting parameter")
- _wait_deleted(client, module, module.params.get('name'))
+ _wait_deleted(client, module, module.params.get("name"))
return True, response
def setup_client(module):
retry_decorator = AWSRetry.jittered_backoff()
- connection = module.client('ssm', retry_decorator=retry_decorator)
+ connection = module.client("ssm", retry_decorator=retry_decorator)
return connection
@@ -549,14 +547,14 @@ def setup_module_object():
name=dict(required=True),
description=dict(),
value=dict(required=False, no_log=True),
- state=dict(default='present', choices=['present', 'absent']),
- string_type=dict(default='String', choices=['String', 'StringList', 'SecureString'], aliases=['type']),
- decryption=dict(default=True, type='bool'),
+ state=dict(default="present", choices=["present", "absent"]),
+ string_type=dict(default="String", choices=["String", "StringList", "SecureString"], aliases=["type"]),
+ decryption=dict(default=True, type="bool"),
key_id=dict(default="alias/aws/ssm"),
- overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
- tier=dict(default='Standard', choices=['Standard', 'Advanced', 'Intelligent-Tiering']),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
+ overwrite_value=dict(default="changed", choices=["never", "changed", "always"]),
+ tier=dict(default="Standard", choices=["Standard", "Advanced", "Intelligent-Tiering"]),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
)
return AnsibleAWSModule(
@@ -567,7 +565,7 @@ def setup_module_object():
def main():
module = setup_module_object()
- state = module.params.get('state')
+ state = module.params.get("state")
client = setup_client(module)
invocations = {
@@ -580,18 +578,17 @@ def main():
try:
parameter_metadata = describe_parameter(
- client, module,
- ParameterFilters=[{"Key": "Name", "Values": [module.params.get('name')]}])
- except is_boto3_error_code('ParameterNotFound'):
+ client, module, ParameterFilters=[{"Key": "Name", "Values": [module.params.get("name")]}]
+ )
+ except is_boto3_error_code("ParameterNotFound"):
return False, {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="to describe parameter")
if parameter_metadata:
- result['parameter_metadata'] = camel_dict_to_snake_dict(parameter_metadata,
- ignore_list=['tags'])
+ result["parameter_metadata"] = camel_dict_to_snake_dict(parameter_metadata, ignore_list=["tags"])
module.exit_json(changed=changed, **result)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py
index c141610bb..a2558c808 100644
--- a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py
+++ b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: stepfunctions_state_machine
version_added: 1.0.0
@@ -44,16 +41,17 @@ options:
choices: [ present, absent ]
type: str
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
- - amazon.aws.tags
author:
- Tom De Keyser (@tdekeyser)
-'''
-EXAMPLES = '''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
# Create a new AWS Step Functions state machine
- name: Setup HelloWorld state machine
community.aws.stepfunctions_state_machine:
@@ -77,61 +75,62 @@ EXAMPLES = '''
community.aws.stepfunctions_state_machine:
name: HelloWorldStateMachine
state: absent
-'''
+"""
-RETURN = '''
+RETURN = r"""
state_machine_arn:
description: ARN of the AWS Step Functions state machine
type: str
returned: always
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_tag_list,
- AWSRetry,
- compare_aws_tags,
- boto3_tag_list_to_ansible_dict,
- )
+"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
def manage_state_machine(state, sfn_client, module):
state_machine_arn = get_state_machine_arn(sfn_client, module)
- if state == 'present':
+ if state == "present":
if state_machine_arn is None:
create(sfn_client, module)
else:
update(state_machine_arn, sfn_client, module)
- elif state == 'absent':
+ elif state == "absent":
if state_machine_arn is not None:
remove(state_machine_arn, sfn_client, module)
- check_mode(module, msg='State is up-to-date.')
+ check_mode(module, msg="State is up-to-date.")
module.exit_json(changed=False, state_machine_arn=state_machine_arn)
def create(sfn_client, module):
- check_mode(module, msg='State machine would be created.', changed=True)
+ check_mode(module, msg="State machine would be created.", changed=True)
- tags = module.params.get('tags')
- sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
+ tags = module.params.get("tags")
+ sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name="key", tag_value_key_name="value") if tags else []
state_machine = sfn_client.create_state_machine(
- name=module.params.get('name'),
- definition=module.params.get('definition'),
- roleArn=module.params.get('role_arn'),
- tags=sfn_tags
+ name=module.params.get("name"),
+ definition=module.params.get("definition"),
+ roleArn=module.params.get("role_arn"),
+ tags=sfn_tags,
)
- module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
+ module.exit_json(changed=True, state_machine_arn=state_machine.get("stateMachineArn"))
def remove(state_machine_arn, sfn_client, module):
- check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
+ check_mode(module, msg=f"State machine would be deleted: {state_machine_arn}", changed=True)
sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
module.exit_json(changed=True, state_machine_arn=state_machine_arn)
@@ -141,29 +140,28 @@ def update(state_machine_arn, sfn_client, module):
tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
- check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
+ check_mode(module, msg=f"State machine would be updated: {state_machine_arn}", changed=True)
sfn_client.update_state_machine(
stateMachineArn=state_machine_arn,
- definition=module.params.get('definition'),
- roleArn=module.params.get('role_arn')
- )
- sfn_client.untag_resource(
- resourceArn=state_machine_arn,
- tagKeys=tags_to_remove
+ definition=module.params.get("definition"),
+ roleArn=module.params.get("role_arn"),
)
+ sfn_client.untag_resource(resourceArn=state_machine_arn, tagKeys=tags_to_remove)
sfn_client.tag_resource(
resourceArn=state_machine_arn,
- tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
+ tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name="key", tag_value_key_name="value"),
)
module.exit_json(changed=True, state_machine_arn=state_machine_arn)
def compare_tags(state_machine_arn, sfn_client, module):
- new_tags = module.params.get('tags')
- current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
- return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
+ new_tags = module.params.get("tags")
+ current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get("tags")
+ return compare_aws_tags(
+ boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get("purge_tags")
+ )
def params_changed(state_machine_arn, sfn_client, module):
@@ -172,7 +170,9 @@ def params_changed(state_machine_arn, sfn_client, module):
from the existing state machine parameters.
"""
current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
- return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
+ return current.get("definition") != module.params.get("definition") or current.get("roleArn") != module.params.get(
+ "role_arn"
+ )
def get_state_machine_arn(sfn_client, module):
@@ -180,42 +180,42 @@ def get_state_machine_arn(sfn_client, module):
Finds the state machine ARN based on the name parameter. Returns None if
there is no state machine with this name.
"""
- target_name = module.params.get('name')
- all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
+ target_name = module.params.get("name")
+ all_state_machines = sfn_client.list_state_machines(aws_retry=True).get("stateMachines")
for state_machine in all_state_machines:
- if state_machine.get('name') == target_name:
- return state_machine.get('stateMachineArn')
+ if state_machine.get("name") == target_name:
+ return state_machine.get("stateMachineArn")
-def check_mode(module, msg='', changed=False):
+def check_mode(module, msg="", changed=False):
if module.check_mode:
module.exit_json(changed=changed, output=msg)
def main():
module_args = dict(
- name=dict(type='str', required=True),
- definition=dict(type='json'),
- role_arn=dict(type='str'),
- state=dict(choices=['present', 'absent'], default='present'),
- tags=dict(default=None, type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, type='bool'),
+ name=dict(type="str", required=True),
+ definition=dict(type="json"),
+ role_arn=dict(type="str"),
+ state=dict(choices=["present", "absent"], default="present"),
+ tags=dict(default=None, type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
)
module = AnsibleAWSModule(
argument_spec=module_args,
- required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
- supports_check_mode=True
+ required_if=[("state", "present", ["role_arn"]), ("state", "present", ["definition"])],
+ supports_check_mode=True,
)
- sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
- state = module.params.get('state')
+ sfn_client = module.client("stepfunctions", retry_decorator=AWSRetry.jittered_backoff(retries=5))
+ state = module.params.get("state")
try:
manage_state_machine(state, sfn_client, module)
except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to manage state machine')
+ module.fail_json_aws(e, msg="Failed to manage state machine")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py
index aacfa987f..b7a9f7efb 100644
--- a/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py
+++ b/ansible_collections/community/aws/plugins/modules/stepfunctions_state_machine_execution.py
@@ -1,13 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2019, Prasad Katti (@prasadkatti)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: stepfunctions_state_machine_execution
version_added: 1.0.0
@@ -47,16 +44,16 @@ options:
type: str
default: ''
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
-
author:
- Prasad Katti (@prasadkatti)
-'''
-EXAMPLES = '''
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
- name: Start an execution of a state machine
community.aws.stepfunctions_state_machine_execution:
name: an_execution_name
@@ -69,9 +66,9 @@ EXAMPLES = '''
execution_arn: "arn:aws:states:us-west-2:123456789012:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
cause: "cause of task failure"
error: "error code of the failure"
-'''
+"""
-RETURN = '''
+RETURN = r"""
execution_arn:
description: ARN of the AWS Step Functions state machine execution.
type: str
@@ -87,7 +84,7 @@ stop_date:
type: str
returned: if action == stop
sample: "2019-11-02T22:39:49.071000-07:00"
-'''
+"""
try:
@@ -97,100 +94,96 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def start_execution(module, sfn_client):
- '''
+ """
start_execution uses execution name to determine if a previous execution already exists.
If an execution by the provided name exists, call client.start_execution will not be called.
- '''
+ """
- state_machine_arn = module.params.get('state_machine_arn')
- name = module.params.get('name')
- execution_input = module.params.get('execution_input')
+ state_machine_arn = module.params.get("state_machine_arn")
+ name = module.params.get("name")
+ execution_input = module.params.get("execution_input")
try:
# list_executions is eventually consistent
- page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
+ page_iterators = sfn_client.get_paginator("list_executions").paginate(stateMachineArn=state_machine_arn)
- for execution in page_iterators.build_full_result()['executions']:
- if name == execution['name']:
- check_mode(module, msg='State machine execution already exists.', changed=False)
+ for execution in page_iterators.build_full_result()["executions"]:
+ if name == execution["name"]:
+ check_mode(module, msg="State machine execution already exists.", changed=False)
module.exit_json(changed=False)
- check_mode(module, msg='State machine execution would be started.', changed=True)
- res_execution = sfn_client.start_execution(
- stateMachineArn=state_machine_arn,
- name=name,
- input=execution_input
- )
- except is_boto3_error_code('ExecutionAlreadyExists'):
+ check_mode(module, msg="State machine execution would be started.", changed=True)
+ res_execution = sfn_client.start_execution(stateMachineArn=state_machine_arn, name=name, input=execution_input)
+ except is_boto3_error_code("ExecutionAlreadyExists"):
# this will never be executed anymore
module.exit_json(changed=False)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to start execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
def stop_execution(module, sfn_client):
-
- cause = module.params.get('cause')
- error = module.params.get('error')
- execution_arn = module.params.get('execution_arn')
+ cause = module.params.get("cause")
+ error = module.params.get("error")
+ execution_arn = module.params.get("execution_arn")
try:
# describe_execution is eventually consistent
- execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
- if execution_status != 'RUNNING':
- check_mode(module, msg='State machine execution is not running.', changed=False)
+ execution_status = sfn_client.describe_execution(executionArn=execution_arn)["status"]
+ if execution_status != "RUNNING":
+ check_mode(module, msg="State machine execution is not running.", changed=False)
module.exit_json(changed=False)
- check_mode(module, msg='State machine execution would be stopped.', changed=True)
- res = sfn_client.stop_execution(
- executionArn=execution_arn,
- cause=cause,
- error=error
- )
+ check_mode(module, msg="State machine execution would be stopped.", changed=True)
+ res = sfn_client.stop_execution(executionArn=execution_arn, cause=cause, error=error)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to stop execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
-def check_mode(module, msg='', changed=False):
+def check_mode(module, msg="", changed=False):
if module.check_mode:
module.exit_json(changed=changed, output=msg)
def main():
module_args = dict(
- action=dict(choices=['start', 'stop'], default='start'),
- name=dict(type='str'),
- execution_input=dict(type='json', default={}),
- state_machine_arn=dict(type='str'),
- cause=dict(type='str', default=''),
- error=dict(type='str', default=''),
- execution_arn=dict(type='str')
+ action=dict(choices=["start", "stop"], default="start"),
+ name=dict(type="str"),
+ execution_input=dict(type="json", default={}),
+ state_machine_arn=dict(type="str"),
+ cause=dict(type="str", default=""),
+ error=dict(type="str", default=""),
+ execution_arn=dict(type="str"),
)
module = AnsibleAWSModule(
argument_spec=module_args,
- required_if=[('action', 'start', ['name', 'state_machine_arn']),
- ('action', 'stop', ['execution_arn']),
- ],
- supports_check_mode=True
+ required_if=[
+ ("action", "start", ["name", "state_machine_arn"]),
+ ("action", "stop", ["execution_arn"]),
+ ],
+ supports_check_mode=True,
)
- sfn_client = module.client('stepfunctions')
+ sfn_client = module.client("stepfunctions")
- action = module.params.get('action')
+ action = module.params.get("action")
if action == "start":
start_execution(module, sfn_client)
else:
stop_execution(module, sfn_client)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/storagegateway_info.py b/ansible_collections/community/aws/plugins/modules/storagegateway_info.py
index 3f3c3ae2f..55b7c4685 100644
--- a/ansible_collections/community/aws/plugins/modules/storagegateway_info.py
+++ b/ansible_collections/community/aws/plugins/modules/storagegateway_info.py
@@ -1,14 +1,12 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This module is sponsored by E.T.A.I. (www.etai.fr)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: storagegateway_info
version_added: 1.0.0
@@ -45,12 +43,12 @@ options:
required: false
default: true
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-RETURN = '''
+RETURN = r"""
gateways:
description: list of gateway objects
returned: always
@@ -161,47 +159,49 @@ gateways:
returned: always
type: str
sample: "present"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: "Get AWS storage gateway information"
- community.aws.aws_sgw_info:
+ community.aws.storagegateway_info:
- name: "Get AWS storage gateway information for region eu-west-3"
- community.aws.aws_sgw_info:
+ community.aws.storagegateway_info:
region: eu-west-3
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+"""
try:
- from botocore.exceptions import BotoCoreError, ClientError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
class SGWInformationManager(object):
def __init__(self, client, module):
self.client = client
self.module = module
- self.name = self.module.params.get('name')
+ self.name = self.module.params.get("name")
def fetch(self):
gateways = self.list_gateways()
for gateway in gateways:
- if self.module.params.get('gather_local_disks'):
+ if self.module.params.get("gather_local_disks"):
self.list_local_disks(gateway)
# File share gateway
- if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
+ if gateway["gateway_type"] == "FILE_S3" and self.module.params.get("gather_file_shares"):
self.list_gateway_file_shares(gateway)
# Volume tape gateway
- elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
+ elif gateway["gateway_type"] == "VTL" and self.module.params.get("gather_tapes"):
self.list_gateway_vtl(gateway)
# iSCSI gateway
- elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
+ elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get("gather_volumes"):
self.list_gateway_volumes(gateway)
self.module.exit_json(gateways=gateways)
@@ -209,12 +209,13 @@ class SGWInformationManager(object):
"""
List all storage gateways for the AWS endpoint.
"""
+
def list_gateways(self):
try:
- paginator = self.client.get_paginator('list_gateways')
+ paginator = self.client.get_paginator("list_gateways")
response = paginator.paginate(
PaginationConfig={
- 'PageSize': 100,
+ "PageSize": 100,
}
).build_full_result()
@@ -231,6 +232,7 @@ class SGWInformationManager(object):
Read file share objects from AWS API response.
Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
"""
+
@staticmethod
def _read_gateway_fileshare_response(fileshares, aws_reponse):
for share in aws_reponse["FileShareInfoList"]:
@@ -244,22 +246,16 @@ class SGWInformationManager(object):
"""
List file shares attached to AWS storage gateway when in S3 mode.
"""
+
def list_gateway_file_shares(self, gateway):
try:
- response = self.client.list_file_shares(
- GatewayARN=gateway["gateway_arn"],
- Limit=100
- )
+ response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Limit=100)
gateway["file_shares"] = []
marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
while marker is not None:
- response = self.client.list_file_shares(
- GatewayARN=gateway["gateway_arn"],
- Marker=marker,
- Limit=100
- )
+ response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Marker=marker, Limit=100)
marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
except (BotoCoreError, ClientError) as e:
@@ -268,10 +264,13 @@ class SGWInformationManager(object):
"""
List storage gateway local disks
"""
+
def list_local_disks(self, gateway):
try:
- gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
- self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
+ gateway["local_disks"] = [
+ camel_dict_to_snake_dict(disk)
+ for disk in self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])["Disks"]
+ ]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
@@ -279,6 +278,7 @@ class SGWInformationManager(object):
Read tape objects from AWS API response.
Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
"""
+
@staticmethod
def _read_gateway_tape_response(tapes, aws_response):
for tape in aws_response["TapeInfos"]:
@@ -292,20 +292,16 @@ class SGWInformationManager(object):
"""
List VTL & VTS attached to AWS storage gateway in VTL mode
"""
+
def list_gateway_vtl(self, gateway):
try:
- response = self.client.list_tapes(
- Limit=100
- )
+ response = self.client.list_tapes(Limit=100)
gateway["tapes"] = []
marker = self._read_gateway_tape_response(gateway["tapes"], response)
while marker is not None:
- response = self.client.list_tapes(
- Marker=marker,
- Limit=100
- )
+ response = self.client.list_tapes(Marker=marker, Limit=100)
marker = self._read_gateway_tape_response(gateway["tapes"], response)
except (BotoCoreError, ClientError) as e:
@@ -314,14 +310,15 @@ class SGWInformationManager(object):
"""
List volumes attached to AWS storage gateway in CACHED or STORAGE mode
"""
+
def list_gateway_volumes(self, gateway):
try:
- paginator = self.client.get_paginator('list_volumes')
+ paginator = self.client.get_paginator("list_volumes")
response = paginator.paginate(
GatewayARN=gateway["gateway_arn"],
PaginationConfig={
- 'PageSize': 100,
- }
+ "PageSize": 100,
+ },
).build_full_result()
gateway["volumes"] = []
@@ -339,10 +336,10 @@ class SGWInformationManager(object):
def main():
argument_spec = dict(
- gather_local_disks=dict(type='bool', default=True),
- gather_tapes=dict(type='bool', default=True),
- gather_file_shares=dict(type='bool', default=True),
- gather_volumes=dict(type='bool', default=True)
+ gather_local_disks=dict(type="bool", default=True),
+ gather_tapes=dict(type="bool", default=True),
+ gather_file_shares=dict(type="bool", default=True),
+ gather_volumes=dict(type="bool", default=True),
)
module = AnsibleAWSModule(
@@ -350,13 +347,13 @@ def main():
supports_check_mode=True,
)
- client = module.client('storagegateway')
+ client = module.client("storagegateway")
if client is None: # this should never happen
- module.fail_json(msg='Unknown error, failed to create storagegateway client, no information available.')
+ module.fail_json(msg="Unknown error, failed to create storagegateway client, no information available.")
SGWInformationManager(client, module).fetch()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/sts_assume_role.py b/ansible_collections/community/aws/plugins/modules/sts_assume_role.py
deleted file mode 100644
index 8e5a3b4fe..000000000
--- a/ansible_collections/community/aws/plugins/modules/sts_assume_role.py
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: sts_assume_role
-version_added: 1.0.0
-short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
-description:
- - Assume a role using AWS Security Token Service and obtain temporary credentials.
-author:
- - Boris Ekelchik (@bekelchik)
- - Marek Piatek (@piontas)
-options:
- role_arn:
- description:
- - The Amazon Resource Name (ARN) of the role that the caller is
- assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
- required: true
- type: str
- role_session_name:
- description:
- - Name of the role's session - will be used by CloudTrail.
- required: true
- type: str
- policy:
- description:
- - Supplemental policy to use in addition to assumed role's policies.
- type: str
- duration_seconds:
- description:
- - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
- - The max depends on the IAM role's sessions duration setting.
- - By default, the value is set to 3600 seconds.
- type: int
- external_id:
- description:
- - A unique identifier that is used by third parties to assume a role in their customers' accounts.
- type: str
- mfa_serial_number:
- description:
- - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
- type: str
- mfa_token:
- description:
- - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
- type: str
-notes:
- - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
-extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
-
-RETURN = '''
-sts_creds:
- description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
- returned: always
- type: dict
- sample:
- access_key: XXXXXXXXXXXXXXXXXXXX
- expiration: '2017-11-11T11:11:11+00:00'
- secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-sts_user:
- description: The Amazon Resource Name (ARN) and the assumed role ID
- returned: always
- type: dict
- sample:
- assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
- arn: ARO123EXAMPLE123:Bob
-changed:
- description: True if obtaining the credentials succeeds
- type: bool
- returned: always
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
-- community.aws.sts_assume_role:
- role_arn: "arn:aws:iam::123456789012:role/someRole"
- role_session_name: "someRoleSession"
- register: assumed_role
-
-# Use the assumed role above to tag an instance in account 123456789012
-- amazon.aws.ec2_tag:
- aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
- aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
- security_token: "{{ assumed_role.sts_creds.session_token }}"
- resource: i-xyzxyz01
- state: present
- tags:
- MyNewTag: value
-
-'''
-
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, ParamValidationError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def _parse_response(response):
- credentials = response.get('Credentials', {})
- user = response.get('AssumedRoleUser', {})
-
- sts_cred = {
- 'access_key': credentials.get('AccessKeyId'),
- 'secret_key': credentials.get('SecretAccessKey'),
- 'session_token': credentials.get('SessionToken'),
- 'expiration': credentials.get('Expiration')
-
- }
- sts_user = camel_dict_to_snake_dict(user)
- return sts_cred, sts_user
-
-
-def assume_role_policy(connection, module):
- params = {
- 'RoleArn': module.params.get('role_arn'),
- 'RoleSessionName': module.params.get('role_session_name'),
- 'Policy': module.params.get('policy'),
- 'DurationSeconds': module.params.get('duration_seconds'),
- 'ExternalId': module.params.get('external_id'),
- 'SerialNumber': module.params.get('mfa_serial_number'),
- 'TokenCode': module.params.get('mfa_token')
- }
- changed = False
-
- kwargs = dict((k, v) for k, v in params.items() if v is not None)
-
- try:
- response = connection.assume_role(**kwargs)
- changed = True
- except (ClientError, ParamValidationError) as e:
- module.fail_json_aws(e)
-
- sts_cred, sts_user = _parse_response(response)
- module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
-
-
-def main():
- argument_spec = dict(
- role_arn=dict(required=True),
- role_session_name=dict(required=True),
- duration_seconds=dict(required=False, default=None, type='int'),
- external_id=dict(required=False, default=None),
- policy=dict(required=False, default=None),
- mfa_serial_number=dict(required=False, default=None),
- mfa_token=dict(required=False, default=None, no_log=True)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- connection = module.client('sts')
-
- assume_role_policy(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/aws/plugins/modules/sts_session_token.py b/ansible_collections/community/aws/plugins/modules/sts_session_token.py
index 03df560e9..cb9f99fd3 100644
--- a/ansible_collections/community/aws/plugins/modules/sts_session_token.py
+++ b/ansible_collections/community/aws/plugins/modules/sts_session_token.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: sts_session_token
version_added: 1.0.0
-short_description: Obtain a session token from the AWS Security Token Service
+short_description: obtain a session token from the AWS Security Token Service
description:
- - Obtain a session token from the AWS Security Token Service.
-author: Victor Costan (@pwnall)
+ - Obtain a session token from the AWS Security Token Service.
+author:
+ - Victor Costan (@pwnall)
options:
duration_seconds:
description:
@@ -30,20 +29,21 @@ options:
- The value provided by the MFA device, if the trust policy of the user requires MFA.
type: str
notes:
- - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
+ - In order to use the session token in a following playbook task you must pass the I(access_key),
+ I(secret_key) and I(session_token) parameters to modules that should use the session credentials.
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-RETURN = """
+RETURN = r"""
sts_creds:
description: The Credentials object returned by the AWS Security Token Service
returned: always
type: list
sample:
- access_key: ASXXXXXXXXXXXXXXXXXX
+ access_key: ASIAXXXXXXXXXXXXXXXX
expiration: "2016-04-08T11:59:47+00:00"
secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -54,26 +54,27 @@ changed:
"""
-EXAMPLES = '''
+EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
- name: Get a session token
community.aws.sts_session_token:
+ access_key: AKIA1EXAMPLE1EXAMPLE
+ secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE
duration_seconds: 3600
register: session_credentials
- name: Use the session token obtained above to tag an instance in account 123456789012
amazon.aws.ec2_tag:
- aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
- aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
- security_token: "{{ session_credentials.sts_creds.session_token }}"
+ access_key: "{{ session_credentials.sts_creds.access_key }}"
+ secret_key: "{{ session_credentials.sts_creds.secret_key }}"
+ session_token: "{{ session_credentials.sts_creds.session_token }}"
resource: i-xyzxyz01
state: present
tags:
- MyNewTag: value
-
-'''
+ MyNewTag: value
+"""
try:
import botocore
@@ -81,35 +82,35 @@ try:
except ImportError:
pass # Handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def normalize_credentials(credentials):
- access_key = credentials.get('AccessKeyId', None)
- secret_key = credentials.get('SecretAccessKey', None)
- session_token = credentials.get('SessionToken', None)
- expiration = credentials.get('Expiration', None)
+ access_key = credentials.get("AccessKeyId", None)
+ secret_key = credentials.get("SecretAccessKey", None)
+ session_token = credentials.get("SessionToken", None)
+ expiration = credentials.get("Expiration", None)
return {
- 'access_key': access_key,
- 'secret_key': secret_key,
- 'session_token': session_token,
- 'expiration': expiration
+ "access_key": access_key,
+ "secret_key": secret_key,
+ "session_token": session_token,
+ "expiration": expiration,
}
def get_session_token(connection, module):
- duration_seconds = module.params.get('duration_seconds')
- mfa_serial_number = module.params.get('mfa_serial_number')
- mfa_token = module.params.get('mfa_token')
+ duration_seconds = module.params.get("duration_seconds")
+ mfa_serial_number = module.params.get("mfa_serial_number")
+ mfa_token = module.params.get("mfa_token")
changed = False
args = {}
if duration_seconds is not None:
- args['DurationSeconds'] = duration_seconds
+ args["DurationSeconds"] = duration_seconds
if mfa_serial_number is not None:
- args['SerialNumber'] = mfa_serial_number
+ args["SerialNumber"] = mfa_serial_number
if mfa_token is not None:
- args['TokenCode'] = mfa_token
+ args["TokenCode"] = mfa_token
try:
response = connection.get_session_token(**args)
@@ -117,13 +118,13 @@ def get_session_token(connection, module):
except ClientError as e:
module.fail_json(msg=e)
- credentials = normalize_credentials(response.get('Credentials', {}))
+ credentials = normalize_credentials(response.get("Credentials", {}))
module.exit_json(changed=changed, sts_creds=credentials)
def main():
argument_spec = dict(
- duration_seconds=dict(required=False, default=None, type='int'),
+ duration_seconds=dict(required=False, default=None, type="int"),
mfa_serial_number=dict(required=False, default=None),
mfa_token=dict(required=False, default=None, no_log=True),
)
@@ -131,12 +132,12 @@ def main():
module = AnsibleAWSModule(argument_spec=argument_spec)
try:
- connection = module.client('sts')
+ connection = module.client("sts")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Failed to connect to AWS')
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
get_session_token(connection, module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_condition.py b/ansible_collections/community/aws/plugins/modules/waf_condition.py
index 63585d50c..5b08cb6de 100644
--- a/ansible_collections/community/aws/plugins/modules/waf_condition.py
+++ b/ansible_collections/community/aws/plugins/modules/waf_condition.py
@@ -1,13 +1,11 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: waf_condition
short_description: Create and delete WAF Conditions
version_added: 1.0.0
@@ -20,10 +18,6 @@ description:
author:
- Will Thames (@willthames)
- Mike Mochan (@mmochan)
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
name:
@@ -137,77 +131,81 @@ options:
- absent
default: present
type: str
-'''
-EXAMPLES = r'''
- - name: create WAF byte condition
- community.aws.waf_condition:
- name: my_byte_condition
- filters:
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: create WAF byte condition
+ community.aws.waf_condition:
+ name: my_byte_condition
+ filters:
- field_to_match: header
position: STARTS_WITH
target_string: Hello
header: Content-type
- type: byte
-
- - name: create WAF geo condition
- community.aws.waf_condition:
- name: my_geo_condition
- filters:
- - country: US
- - country: AU
- - country: AT
- type: geo
-
- - name: create IP address condition
- community.aws.waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- - ip_address: "192.168.0.0/24"
- type: ip
-
- - name: create WAF regex condition
- community.aws.waf_condition:
- name: my_regex_condition
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
-
- - name: create WAF size condition
- community.aws.waf_condition:
- name: my_size_condition
- filters:
- - field_to_match: query_string
- size: 300
- comparison: GT
- type: size
-
- - name: create WAF sql injection condition
- community.aws.waf_condition:
- name: my_sql_condition
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: sql
-
- - name: create WAF xss condition
- community.aws.waf_condition:
- name: my_xss_condition
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: xss
-
-'''
-
-RETURN = r'''
+ type: byte
+
+- name: create WAF geo condition
+ community.aws.waf_condition:
+ name: my_geo_condition
+ filters:
+ - country: US
+ - country: AU
+ - country: AT
+ type: geo
+
+- name: create IP address condition
+ community.aws.waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ - ip_address: "192.168.0.0/24"
+ type: ip
+
+- name: create WAF regex condition
+ community.aws.waf_condition:
+ name: my_regex_condition
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+
+- name: create WAF size condition
+ community.aws.waf_condition:
+ name: my_size_condition
+ filters:
+ - field_to_match: query_string
+ size: 300
+ comparison: GT
+ type: size
+
+- name: create WAF sql injection condition
+ community.aws.waf_condition:
+ name: my_sql_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: sql
+
+- name: create WAF xss condition
+ community.aws.waf_condition:
+ name: my_xss_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: xss
+"""
+
+RETURN = r"""
condition:
description: Condition returned by operation.
returned: always
@@ -397,7 +395,7 @@ condition:
description: transformation applied to the text before matching.
type: str
sample: URL_DECODE
-'''
+"""
try:
import botocore
@@ -406,85 +404,92 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP
-from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff
from ansible_collections.amazon.aws.plugins.module_utils.waf import get_rule_with_backoff
from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff
from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
-class Condition(object):
+class Condition(object):
def __init__(self, client, module):
self.client = client
self.module = module
- self.type = module.params['type']
- self.method_suffix = MATCH_LOOKUP[self.type]['method']
- self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
- self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
- self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
- self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
- self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
- self.conditiontype = MATCH_LOOKUP[self.type]['type']
+ self.type = module.params["type"]
+ self.method_suffix = MATCH_LOOKUP[self.type]["method"]
+ self.conditionset = MATCH_LOOKUP[self.type]["conditionset"]
+ self.conditionsets = MATCH_LOOKUP[self.type]["conditionset"] + "s"
+ self.conditionsetid = MATCH_LOOKUP[self.type]["conditionset"] + "Id"
+ self.conditiontuple = MATCH_LOOKUP[self.type]["conditiontuple"]
+ self.conditiontuples = MATCH_LOOKUP[self.type]["conditiontuple"] + "s"
+ self.conditiontype = MATCH_LOOKUP[self.type]["type"]
def format_for_update(self, condition_set_id):
# Prep kwargs
kwargs = dict()
- kwargs['Updates'] = list()
+ kwargs["Updates"] = list()
- for filtr in self.module.params.get('filters'):
+ for filtr in self.module.params.get("filters"):
# Only for ip_set
- if self.type == 'ip':
+ if self.type == "ip":
# there might be a better way of detecting an IPv6 address
- if ':' in filtr.get('ip_address'):
- ip_type = 'IPV6'
+ if ":" in filtr.get("ip_address"):
+ ip_type = "IPV6"
else:
- ip_type = 'IPV4'
- condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
+ ip_type = "IPV4"
+ condition_insert = {"Type": ip_type, "Value": filtr.get("ip_address")}
# Specific for geo_match_set
- if self.type == 'geo':
- condition_insert = dict(Type='Country', Value=filtr.get('country'))
+ if self.type == "geo":
+ condition_insert = dict(Type="Country", Value=filtr.get("country"))
# Common For everything but ip_set and geo_match_set
- if self.type not in ('ip', 'geo'):
-
- condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
- TextTransformation=filtr.get('transformation', 'none').upper())
-
- if filtr.get('field_to_match').upper() == "HEADER":
- if filtr.get('header'):
- condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
+ if self.type not in ("ip", "geo"):
+ condition_insert = dict(
+ FieldToMatch=dict(Type=filtr.get("field_to_match").upper()),
+ TextTransformation=filtr.get("transformation", "none").upper(),
+ )
+
+ if filtr.get("field_to_match").upper() == "HEADER":
+ if filtr.get("header"):
+ condition_insert["FieldToMatch"]["Data"] = filtr.get("header").lower()
else:
self.module.fail_json(msg=str("DATA required when HEADER requested"))
# Specific for byte_match_set
- if self.type == 'byte':
- condition_insert['TargetString'] = filtr.get('target_string')
- condition_insert['PositionalConstraint'] = filtr.get('position')
+ if self.type == "byte":
+ condition_insert["TargetString"] = filtr.get("target_string")
+ condition_insert["PositionalConstraint"] = filtr.get("position")
# Specific for size_constraint_set
- if self.type == 'size':
- condition_insert['ComparisonOperator'] = filtr.get('comparison')
- condition_insert['Size'] = filtr.get('size')
+ if self.type == "size":
+ condition_insert["ComparisonOperator"] = filtr.get("comparison")
+ condition_insert["Size"] = filtr.get("size")
# Specific for regex_match_set
- if self.type == 'regex':
- condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
+ if self.type == "regex":
+ condition_insert["RegexPatternSetId"] = self.ensure_regex_pattern_present(filtr.get("regex_pattern"))[
+ "RegexPatternSetId"
+ ]
- kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
+ kwargs["Updates"].append({"Action": "INSERT", self.conditiontuple: condition_insert})
kwargs[self.conditionsetid] = condition_set_id
return kwargs
def format_for_deletion(self, condition):
- return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
- for current_condition_tuple in condition[self.conditiontuples]],
- self.conditionsetid: condition[self.conditionsetid]}
+ return {
+ "Updates": [
+ {"Action": "DELETE", self.conditiontuple: current_condition_tuple}
+ for current_condition_tuple in condition[self.conditiontuples]
+ ],
+ self.conditionsetid: condition[self.conditionsetid],
+ }
@AWSRetry.exponential_backoff()
def list_regex_patterns_with_backoff(self, **params):
@@ -502,60 +507,77 @@ class Condition(object):
try:
response = self.list_regex_patterns_with_backoff(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not list regex patterns')
- regex_patterns.extend(response['RegexPatternSets'])
- if 'NextMarker' in response:
- params['NextMarker'] = response['NextMarker']
+ self.module.fail_json_aws(e, msg="Could not list regex patterns")
+ regex_patterns.extend(response["RegexPatternSets"])
+ if "NextMarker" in response:
+ params["NextMarker"] = response["NextMarker"]
else:
break
return regex_patterns
def get_regex_pattern_by_name(self, name):
existing_regex_patterns = self.list_regex_patterns()
- regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
+ regex_lookup = dict((item["Name"], item["RegexPatternSetId"]) for item in existing_regex_patterns)
if name in regex_lookup:
- return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
+ return self.get_regex_pattern_set_with_backoff(regex_lookup[name])["RegexPatternSet"]
else:
return None
def ensure_regex_pattern_present(self, regex_pattern):
- name = regex_pattern['name']
+ name = regex_pattern["name"]
pattern_set = self.get_regex_pattern_by_name(name)
if not pattern_set:
- pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
- self.client.create_regex_pattern_set)['RegexPatternSet']
- missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
- extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
+ pattern_set = run_func_with_change_token_backoff(
+ self.client, self.module, {"Name": name}, self.client.create_regex_pattern_set
+ )["RegexPatternSet"]
+ missing = set(regex_pattern["regex_strings"]) - set(pattern_set["RegexPatternStrings"])
+ extra = set(pattern_set["RegexPatternStrings"]) - set(regex_pattern["regex_strings"])
if not missing and not extra:
return pattern_set
- updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
- updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
- run_func_with_change_token_backoff(self.client, self.module,
- {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
- self.client.update_regex_pattern_set, wait=True)
- return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
+ updates = [{"Action": "INSERT", "RegexPatternString": pattern} for pattern in missing]
+ updates.extend([{"Action": "DELETE", "RegexPatternString": pattern} for pattern in extra])
+ run_func_with_change_token_backoff(
+ self.client,
+ self.module,
+ {"RegexPatternSetId": pattern_set["RegexPatternSetId"], "Updates": updates},
+ self.client.update_regex_pattern_set,
+ wait=True,
+ )
+ return self.get_regex_pattern_set_with_backoff(pattern_set["RegexPatternSetId"])["RegexPatternSet"]
def delete_unused_regex_pattern(self, regex_pattern_set_id):
try:
- regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
+ regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)[
+ "RegexPatternSet"
+ ]
updates = list()
- for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
- updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
- run_func_with_change_token_backoff(self.client, self.module,
- {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
- self.client.update_regex_pattern_set)
-
- run_func_with_change_token_backoff(self.client, self.module,
- {'RegexPatternSetId': regex_pattern_set_id},
- self.client.delete_regex_pattern_set, wait=True)
- except is_boto3_error_code('WAFNonexistentItemException'):
+ for regex_pattern_string in regex_pattern_set["RegexPatternStrings"]:
+ updates.append({"Action": "DELETE", "RegexPatternString": regex_pattern_string})
+ run_func_with_change_token_backoff(
+ self.client,
+ self.module,
+ {"RegexPatternSetId": regex_pattern_set_id, "Updates": updates},
+ self.client.update_regex_pattern_set,
+ )
+
+ run_func_with_change_token_backoff(
+ self.client,
+ self.module,
+ {"RegexPatternSetId": regex_pattern_set_id},
+ self.client.delete_regex_pattern_set,
+ wait=True,
+ )
+ except is_boto3_error_code("WAFNonexistentItemException"):
return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- self.module.fail_json_aws(e, msg='Could not delete regex pattern')
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ self.module.fail_json_aws(e, msg="Could not delete regex pattern")
def get_condition_by_name(self, name):
- all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
+ all_conditions = [d for d in self.list_conditions() if d["Name"] == name]
if all_conditions:
return all_conditions[0][self.conditionsetid]
@@ -563,17 +585,17 @@ class Condition(object):
def get_condition_by_id_with_backoff(self, condition_set_id):
params = dict()
params[self.conditionsetid] = condition_set_id
- func = getattr(self.client, 'get_' + self.method_suffix)
+ func = getattr(self.client, "get_" + self.method_suffix)
return func(**params)[self.conditionset]
def get_condition_by_id(self, condition_set_id):
try:
return self.get_condition_by_id_with_backoff(condition_set_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not get condition')
+ self.module.fail_json_aws(e, msg="Could not get condition")
def list_conditions(self):
- method = 'list_' + self.method_suffix + 's'
+ method = "list_" + self.method_suffix + "s"
try:
paginator = self.client.get_paginator(method)
func = paginator.paginate().build_full_result
@@ -583,66 +605,68 @@ class Condition(object):
try:
return func()[self.conditionsets]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
+ self.module.fail_json_aws(e, msg=f"Could not list {self.type} conditions")
def tidy_up_regex_patterns(self, regex_match_set):
all_regex_match_sets = self.list_conditions()
all_match_set_patterns = list()
for rms in all_regex_match_sets:
- all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
- for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
+ all_match_set_patterns.extend(
+ conditiontuple["RegexPatternSetId"]
+ for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples]
+ )
for filtr in regex_match_set[self.conditiontuples]:
- if filtr['RegexPatternSetId'] not in all_match_set_patterns:
- self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
+ if filtr["RegexPatternSetId"] not in all_match_set_patterns:
+ self.delete_unused_regex_pattern(filtr["RegexPatternSetId"])
def find_condition_in_rules(self, condition_set_id):
rules_in_use = []
try:
- if self.client.__class__.__name__ == 'WAF':
+ if self.client.__class__.__name__ == "WAF":
all_rules = list_rules_with_backoff(self.client)
- elif self.client.__class__.__name__ == 'WAFRegional':
+ elif self.client.__class__.__name__ == "WAFRegional":
all_rules = list_regional_rules_with_backoff(self.client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not list rules')
+ self.module.fail_json_aws(e, msg="Could not list rules")
for rule in all_rules:
try:
- rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
+ rule_details = get_rule_with_backoff(self.client, rule["RuleId"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not get rule details')
- if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
- rules_in_use.append(rule_details['Name'])
+ self.module.fail_json_aws(e, msg="Could not get rule details")
+ if condition_set_id in [predicate["DataId"] for predicate in rule_details["Predicates"]]:
+ rules_in_use.append(rule_details["Name"])
return rules_in_use
def find_and_delete_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
in_use_rules = self.find_condition_in_rules(condition_set_id)
if in_use_rules:
- rulenames = ', '.join(in_use_rules)
- self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
+ rulenames = ", ".join(in_use_rules)
+ self.module.fail_json(msg=f"Condition {current_condition['Name']} is in use by {rulenames}")
if current_condition[self.conditiontuples]:
# Filters are deleted using update with the DELETE action
- func = getattr(self.client, 'update_' + self.method_suffix)
+ func = getattr(self.client, "update_" + self.method_suffix)
params = self.format_for_deletion(current_condition)
try:
# We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not delete filters from condition')
- func = getattr(self.client, 'delete_' + self.method_suffix)
+ self.module.fail_json_aws(e, msg="Could not delete filters from condition")
+ func = getattr(self.client, "delete_" + self.method_suffix)
params = dict()
params[self.conditionsetid] = condition_set_id
try:
run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not delete condition')
+ self.module.fail_json_aws(e, msg="Could not delete condition")
# tidy up regex patterns
- if self.type == 'regex':
+ if self.type == "regex":
self.tidy_up_regex_patterns(current_condition)
return True, {}
def find_missing(self, update, current_condition):
missing = []
- for desired in update['Updates']:
+ for desired in update["Updates"]:
found = False
desired_condition = desired[self.conditiontuple]
current_conditions = current_condition[self.conditiontuples]
@@ -657,39 +681,41 @@ class Condition(object):
current_condition = self.get_condition_by_id(condition_set_id)
update = self.format_for_update(condition_set_id)
missing = self.find_missing(update, current_condition)
- if self.module.params.get('purge_filters'):
- extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
- for current_tuple in current_condition[self.conditiontuples]
- if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
+ if self.module.params.get("purge_filters"):
+ extra = [
+ {"Action": "DELETE", self.conditiontuple: current_tuple}
+ for current_tuple in current_condition[self.conditiontuples]
+ if current_tuple not in [desired[self.conditiontuple] for desired in update["Updates"]]
+ ]
else:
extra = []
changed = bool(missing or extra)
if changed:
- update['Updates'] = missing + extra
- func = getattr(self.client, 'update_' + self.method_suffix)
+ update["Updates"] = missing + extra
+ func = getattr(self.client, "update_" + self.method_suffix)
try:
result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not update condition')
+ self.module.fail_json_aws(e, msg="Could not update condition")
return changed, self.get_condition_by_id(condition_set_id)
def ensure_condition_present(self):
- name = self.module.params['name']
+ name = self.module.params["name"]
condition_set_id = self.get_condition_by_name(name)
if condition_set_id:
return self.find_and_update_condition(condition_set_id)
else:
params = dict()
- params['Name'] = name
- func = getattr(self.client, 'create_' + self.method_suffix)
+ params["Name"] = name
+ func = getattr(self.client, "create_" + self.method_suffix)
try:
condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not create condition')
+ self.module.fail_json_aws(e, msg="Could not create condition")
return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
def ensure_condition_absent(self):
- condition_set_id = self.get_condition_by_name(self.module.params['name'])
+ condition_set_id = self.get_condition_by_name(self.module.params["name"])
if condition_set_id:
return self.find_and_delete_condition(condition_set_id)
return False, {}
@@ -698,45 +724,46 @@ class Condition(object):
def main():
filters_subspec = dict(
country=dict(),
- field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
+ field_to_match=dict(choices=["uri", "query_string", "header", "method", "body"]),
header=dict(),
- transformation=dict(choices=['none', 'compress_white_space',
- 'html_entity_decode', 'lowercase',
- 'cmd_line', 'url_decode']),
- position=dict(choices=['exactly', 'starts_with', 'ends_with',
- 'contains', 'contains_word']),
- comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
+ transformation=dict(
+ choices=["none", "compress_white_space", "html_entity_decode", "lowercase", "cmd_line", "url_decode"]
+ ),
+ position=dict(choices=["exactly", "starts_with", "ends_with", "contains", "contains_word"]),
+ comparison=dict(choices=["EQ", "NE", "LE", "LT", "GE", "GT"]),
target_string=dict(), # Bytes
- size=dict(type='int'),
+ size=dict(type="int"),
ip_address=dict(),
regex_pattern=dict(),
)
argument_spec = dict(
name=dict(required=True),
- type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
- filters=dict(type='list', elements='dict'),
- purge_filters=dict(type='bool', default=False),
- waf_regional=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
+ type=dict(required=True, choices=["byte", "geo", "ip", "regex", "size", "sql", "xss"]),
+ filters=dict(type="list", elements="dict"),
+ purge_filters=dict(type="bool", default=False),
+ waf_regional=dict(type="bool", default=False),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[["state", "present", ["filters"]]],
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['filters']]])
- state = module.params.get('state')
+ state = module.params.get("state")
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ resource = "waf" if not module.params["waf_regional"] else "waf-regional"
client = module.client(resource)
condition = Condition(client, module)
- if state == 'present':
+ if state == "present":
(changed, results) = condition.ensure_condition_present()
# return a condition agnostic ID for use by waf_rule
- results['ConditionId'] = results[condition.conditionsetid]
+ results["ConditionId"] = results[condition.conditionsetid]
else:
(changed, results) = condition.ensure_condition_absent()
module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_info.py b/ansible_collections/community/aws/plugins/modules/waf_info.py
index 6a49a886e..711d1d8de 100644
--- a/ansible_collections/community/aws/plugins/modules/waf_info.py
+++ b/ansible_collections/community/aws/plugins/modules/waf_info.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
module: waf_info
short_description: Retrieve information for WAF ACLs, Rules, Conditions and Filters
version_added: 1.0.0
@@ -29,12 +27,12 @@ author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: obtain all WAF information
community.aws.waf_info:
@@ -46,9 +44,9 @@ EXAMPLES = '''
community.aws.waf_info:
name: test_waf
waf_regional: true
-'''
+"""
-RETURN = '''
+RETURN = r"""
wafs:
description: The WAFs that match the passed arguments.
returned: success
@@ -114,31 +112,31 @@ wafs:
"type": "ByteMatch"
}
]
-'''
+"""
+
+from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls, get_web_acl
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def main():
argument_spec = dict(
name=dict(required=False),
- waf_regional=dict(type='bool', default=False)
+ waf_regional=dict(type="bool", default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ resource = "waf" if not module.params["waf_regional"] else "waf-regional"
client = module.client(resource)
web_acls = list_web_acls(client, module)
- name = module.params['name']
+ name = module.params["name"]
if name:
- web_acls = [web_acl for web_acl in web_acls if
- web_acl['Name'] == name]
+ web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name]
if not web_acls:
- module.fail_json(msg="WAF named %s not found" % name)
- module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId'])
- for web_acl in web_acls])
+ module.fail_json(msg=f"WAF named {name} not found")
+ module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls])
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_rule.py b/ansible_collections/community/aws/plugins/modules/waf_rule.py
index a994b1831..87a02bbbd 100644
--- a/ansible_collections/community/aws/plugins/modules/waf_rule.py
+++ b/ansible_collections/community/aws/plugins/modules/waf_rule.py
@@ -1,13 +1,11 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: waf_rule
short_description: Create and delete WAF Rules
version_added: 1.0.0
@@ -20,10 +18,6 @@ description:
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
name:
@@ -71,30 +65,35 @@ options:
default: false
required: false
type: bool
-'''
-
-EXAMPLES = r'''
- - name: create WAF rule
- community.aws.waf_rule:
- name: my_waf_rule
- conditions:
- - name: my_regex_condition
- type: regex
- negated: false
- - name: my_geo_condition
- type: geo
- negated: false
- - name: my_byte_condition
- type: byte
- negated: true
-
- - name: remove WAF rule
- community.aws.waf_rule:
- name: "my_waf_rule"
- state: absent
-'''
-
-RETURN = r'''
+
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: create WAF rule
+ community.aws.waf_rule:
+ name: my_waf_rule
+ conditions:
+ - name: my_regex_condition
+ type: regex
+ negated: false
+ - name: my_geo_condition
+ type: geo
+ negated: false
+ - name: my_byte_condition
+ type: byte
+ negated: true
+
+- name: remove WAF rule
+ community.aws.waf_rule:
+ name: "my_waf_rule"
+ state: absent
+"""
+
+RETURN = r"""
rule:
description: WAF rule contents
returned: always
@@ -135,7 +134,7 @@ rule:
returned: always
type: str
sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
-'''
+"""
import re
@@ -144,62 +143,62 @@ try:
except ImportError:
pass # handled by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.waf import (
- MATCH_LOOKUP,
- list_regional_rules_with_backoff,
- list_rules_with_backoff,
- run_func_with_change_token_backoff,
- get_web_acl_with_backoff,
- list_web_acls_with_backoff,
- list_regional_web_acls_with_backoff,
-)
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.waf import MATCH_LOOKUP
+from ansible_collections.amazon.aws.plugins.module_utils.waf import get_web_acl_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_rule_by_name(client, module, name):
- rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
+ rules = [d["RuleId"] for d in list_rules(client, module) if d["Name"] == name]
if rules:
return rules[0]
def get_rule(client, module, rule_id):
try:
- return client.get_rule(RuleId=rule_id)['Rule']
+ return client.get_rule(RuleId=rule_id)["Rule"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get WAF rule')
+ module.fail_json_aws(e, msg="Could not get WAF rule")
def list_rules(client, module):
- if client.__class__.__name__ == 'WAF':
+ if client.__class__.__name__ == "WAF":
try:
return list_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list WAF rules')
- elif client.__class__.__name__ == 'WAFRegional':
+ module.fail_json_aws(e, msg="Could not list WAF rules")
+ elif client.__class__.__name__ == "WAFRegional":
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list WAF Regional rules')
+ module.fail_json_aws(e, msg="Could not list WAF Regional rules")
def list_regional_rules(client, module):
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list WAF rules')
+ module.fail_json_aws(e, msg="Could not list WAF rules")
def find_and_update_rule(client, module, rule_id):
rule = get_rule(client, module, rule_id)
- rule_id = rule['RuleId']
+ rule_id = rule["RuleId"]
existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
all_conditions = dict()
for condition_type in MATCH_LOOKUP:
- method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
+ method = "list_" + MATCH_LOOKUP[condition_type]["method"] + "s"
all_conditions[condition_type] = dict()
try:
paginator = client.get_paginator(method)
@@ -209,125 +208,133 @@ def find_and_update_rule(client, module, rule_id):
# and throw different exceptions
func = getattr(client, method)
try:
- pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
+ pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
+ module.fail_json_aws(e, msg=f"Could not list {condition_type} conditions")
for pred in pred_results:
- pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
- all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
- all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
+ pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"]
+ all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred)
+ all_conditions[condition_type][pred["DataId"]] = camel_dict_to_snake_dict(pred)
- for condition in module.params['conditions']:
- desired_conditions[condition['type']][condition['name']] = condition
+ for condition in module.params["conditions"]:
+ desired_conditions[condition["type"]][condition["name"]] = condition
- reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
- for condition in rule['Predicates']:
- existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
+ reverse_condition_types = dict((v["type"], k) for (k, v) in MATCH_LOOKUP.items())
+ for condition in rule["Predicates"]:
+ existing_conditions[reverse_condition_types[condition["Type"]]][condition["DataId"]] = camel_dict_to_snake_dict(
+ condition
+ )
insertions = list()
deletions = list()
for condition_type in desired_conditions:
- for (condition_name, condition) in desired_conditions[condition_type].items():
+ for condition_name, condition in desired_conditions[condition_type].items():
if condition_name not in all_conditions[condition_type]:
- module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
- condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
- if condition['data_id'] not in existing_conditions[condition_type]:
+ module.fail_json(msg=f"Condition {condition_name} of type {condition_type} does not exist")
+ condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"]
+ if condition["data_id"] not in existing_conditions[condition_type]:
insertions.append(format_for_insertion(condition))
- if module.params['purge_conditions']:
+ if module.params["purge_conditions"]:
for condition_type in existing_conditions:
- deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
- if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
+ deletions.extend(
+ [
+ format_for_deletion(condition)
+ for condition in existing_conditions[condition_type].values()
+ if not all_conditions[condition_type][condition["data_id"]]["name"]
+ in desired_conditions[condition_type]
+ ]
+ )
changed = bool(insertions or deletions)
- update = {
- 'RuleId': rule_id,
- 'Updates': insertions + deletions
- }
+ update = {"RuleId": rule_id, "Updates": insertions + deletions}
if changed:
try:
run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update rule conditions')
+ module.fail_json_aws(e, msg="Could not update rule conditions")
return changed, get_rule(client, module, rule_id)
def format_for_insertion(condition):
- return dict(Action='INSERT',
- Predicate=dict(Negated=condition['negated'],
- Type=MATCH_LOOKUP[condition['type']]['type'],
- DataId=condition['data_id']))
+ return dict(
+ Action="INSERT",
+ Predicate=dict(
+ Negated=condition["negated"], Type=MATCH_LOOKUP[condition["type"]]["type"], DataId=condition["data_id"]
+ ),
+ )
def format_for_deletion(condition):
- return dict(Action='DELETE',
- Predicate=dict(Negated=condition['negated'],
- Type=condition['type'],
- DataId=condition['data_id']))
+ return dict(
+ Action="DELETE",
+ Predicate=dict(Negated=condition["negated"], Type=condition["type"], DataId=condition["data_id"]),
+ )
def remove_rule_conditions(client, module, rule_id):
- conditions = get_rule(client, module, rule_id)['Predicates']
+ conditions = get_rule(client, module, rule_id)["Predicates"]
updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
try:
- run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
+ run_func_with_change_token_backoff(client, module, {"RuleId": rule_id, "Updates": updates}, client.update_rule)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not remove rule conditions')
+ module.fail_json_aws(e, msg="Could not remove rule conditions")
def ensure_rule_present(client, module):
- name = module.params['name']
+ name = module.params["name"]
rule_id = get_rule_by_name(client, module, name)
params = dict()
if rule_id:
return find_and_update_rule(client, module, rule_id)
else:
- params['Name'] = module.params['name']
- metric_name = module.params['metric_name']
+ params["Name"] = module.params["name"]
+ metric_name = module.params["metric_name"]
if not metric_name:
- metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
- params['MetricName'] = metric_name
+ metric_name = re.sub(r"[^a-zA-Z0-9]", "", module.params["name"])
+ params["MetricName"] = metric_name
try:
- new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
+ new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)["Rule"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not create rule')
- return find_and_update_rule(client, module, new_rule['RuleId'])
+ module.fail_json_aws(e, msg="Could not create rule")
+ return find_and_update_rule(client, module, new_rule["RuleId"])
def find_rule_in_web_acls(client, module, rule_id):
web_acls_in_use = []
try:
- if client.__class__.__name__ == 'WAF':
+ if client.__class__.__name__ == "WAF":
all_web_acls = list_web_acls_with_backoff(client)
- elif client.__class__.__name__ == 'WAFRegional':
+ elif client.__class__.__name__ == "WAFRegional":
all_web_acls = list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list Web ACLs')
+ module.fail_json_aws(e, msg="Could not list Web ACLs")
for web_acl in all_web_acls:
try:
- web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
+ web_acl_details = get_web_acl_with_backoff(client, web_acl["WebACLId"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACL details')
- if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
- web_acls_in_use.append(web_acl_details['Name'])
+ module.fail_json_aws(e, msg="Could not get Web ACL details")
+ if rule_id in [rule["RuleId"] for rule in web_acl_details["Rules"]]:
+ web_acls_in_use.append(web_acl_details["Name"])
return web_acls_in_use
def ensure_rule_absent(client, module):
- rule_id = get_rule_by_name(client, module, module.params['name'])
+ rule_id = get_rule_by_name(client, module, module.params["name"])
in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
if in_use_web_acls:
- web_acl_names = ', '.join(in_use_web_acls)
- module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
- (module.params['name'], web_acl_names))
+ web_acl_names = ", ".join(in_use_web_acls)
+ module.fail_json(msg=f"Rule {module.params['name']} is in use by Web ACL(s) {web_acl_names}")
if rule_id:
remove_rule_conditions(client, module, rule_id)
try:
- return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
+ return True, run_func_with_change_token_backoff(
+ client, module, {"RuleId": rule_id}, client.delete_rule, wait=True
+ )
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not delete rule')
+ module.fail_json_aws(e, msg="Could not delete rule")
return False, {}
@@ -335,17 +342,17 @@ def main():
argument_spec = dict(
name=dict(required=True),
metric_name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- conditions=dict(type='list', elements='dict'),
- purge_conditions=dict(type='bool', default=False),
- waf_regional=dict(type='bool', default=False),
+ state=dict(default="present", choices=["present", "absent"]),
+ conditions=dict(type="list", elements="dict"),
+ purge_conditions=dict(type="bool", default=False),
+ waf_regional=dict(type="bool", default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
- state = module.params.get('state')
+ state = module.params.get("state")
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ resource = "waf" if not module.params["waf_regional"] else "waf-regional"
client = module.client(resource)
- if state == 'present':
+ if state == "present":
(changed, results) = ensure_rule_present(client, module)
else:
(changed, results) = ensure_rule_absent(client, module)
@@ -353,5 +360,5 @@ def main():
module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/waf_web_acl.py b/ansible_collections/community/aws/plugins/modules/waf_web_acl.py
index 9d5ad59e4..021ca568d 100644
--- a/ansible_collections/community/aws/plugins/modules/waf_web_acl.py
+++ b/ansible_collections/community/aws/plugins/modules/waf_web_acl.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: waf_web_acl
short_description: Create and delete WAF Web ACLs
version_added: 1.0.0
@@ -19,10 +17,6 @@ description:
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
-extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
options:
name:
@@ -85,27 +79,32 @@ options:
default: false
required: false
type: bool
-'''
-
-EXAMPLES = r'''
- - name: create web ACL
- community.aws.waf_web_acl:
- name: my_web_acl
- rules:
- - name: my_rule
- priority: 1
- action: block
- default_action: block
- purge_rules: true
- state: present
-
- - name: delete the web acl
- community.aws.waf_web_acl:
- name: my_web_acl
- state: absent
-'''
-
-RETURN = r'''
+
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: create web ACL
+ community.aws.waf_web_acl:
+ name: my_web_acl
+ rules:
+ - name: my_rule
+ priority: 1
+ action: block
+ default_action: block
+ purge_rules: true
+ state: present
+
+- name: delete the web acl
+ community.aws.waf_web_acl:
+ name: my_web_acl
+ state: absent
+"""
+
+RETURN = r"""
web_acl:
description: contents of the Web ACL.
returned: always
@@ -158,29 +157,29 @@ web_acl:
returned: always
type: str
sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
-'''
+"""
+
+import re
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
-import re
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_rules_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_regional_web_acls_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_rules_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import list_web_acls_with_backoff
+from ansible_collections.amazon.aws.plugins.module_utils.waf import run_func_with_change_token_backoff
from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.waf import (
- list_regional_rules_with_backoff,
- list_regional_web_acls_with_backoff,
- list_rules_with_backoff,
- list_web_acls_with_backoff,
- run_func_with_change_token_backoff,
-)
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_web_acl_by_name(client, module, name):
- acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
+ acls = [d["WebACLId"] for d in list_web_acls(client, module) if d["Name"] == name]
if acls:
return acls[0]
else:
@@ -188,91 +187,93 @@ def get_web_acl_by_name(client, module, name):
def create_rule_lookup(client, module):
- if client.__class__.__name__ == 'WAF':
+ if client.__class__.__name__ == "WAF":
try:
rules = list_rules_with_backoff(client)
- return dict((rule['Name'], rule) for rule in rules)
+ return dict((rule["Name"], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list rules')
- elif client.__class__.__name__ == 'WAFRegional':
+ module.fail_json_aws(e, msg="Could not list rules")
+ elif client.__class__.__name__ == "WAFRegional":
try:
rules = list_regional_rules_with_backoff(client)
- return dict((rule['Name'], rule) for rule in rules)
+ return dict((rule["Name"], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list regional rules')
+ module.fail_json_aws(e, msg="Could not list regional rules")
def get_web_acl(client, module, web_acl_id):
try:
- return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
+ return client.get_web_acl(WebACLId=web_acl_id)["WebACL"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
+ module.fail_json_aws(e, msg=f"Could not get Web ACL with id {web_acl_id}")
-def list_web_acls(client, module,):
- if client.__class__.__name__ == 'WAF':
+def list_web_acls(
+ client,
+ module,
+):
+ if client.__class__.__name__ == "WAF":
try:
return list_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACLs')
- elif client.__class__.__name__ == 'WAFRegional':
+ module.fail_json_aws(e, msg="Could not get Web ACLs")
+ elif client.__class__.__name__ == "WAFRegional":
try:
return list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACLs')
+ module.fail_json_aws(e, msg="Could not get Web ACLs")
def find_and_update_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
rule_lookup = create_rule_lookup(client, module)
- existing_rules = acl['Rules']
- desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
- 'Priority': rule['priority'],
- 'Action': {'Type': rule['action'].upper()},
- 'Type': rule.get('type', 'regular').upper()}
- for rule in module.params['rules']]
+ existing_rules = acl["Rules"]
+ desired_rules = [
+ {
+ "RuleId": rule_lookup[rule["name"]]["RuleId"],
+ "Priority": rule["priority"],
+ "Action": {"Type": rule["action"].upper()},
+ "Type": rule.get("type", "regular").upper(),
+ }
+ for rule in module.params["rules"]
+ ]
missing = [rule for rule in desired_rules if rule not in existing_rules]
extras = []
- if module.params['purge_rules']:
+ if module.params["purge_rules"]:
extras = [rule for rule in existing_rules if rule not in desired_rules]
- insertions = [format_for_update(rule, 'INSERT') for rule in missing]
- deletions = [format_for_update(rule, 'DELETE') for rule in extras]
+ insertions = [format_for_update(rule, "INSERT") for rule in missing]
+ deletions = [format_for_update(rule, "DELETE") for rule in extras]
changed = bool(insertions + deletions)
# Purge rules before adding new ones in case a deletion shares the same
# priority as an insertion.
- params = {
- 'WebACLId': acl['WebACLId'],
- 'DefaultAction': acl['DefaultAction']
- }
+ params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"]}
change_tokens = []
if deletions:
try:
- params['Updates'] = deletions
+ params["Updates"] = deletions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
- change_tokens.append(result['ChangeToken'])
+ change_tokens.append(result["ChangeToken"])
get_waiter(
- client, 'change_token_in_sync',
- ).wait(
- ChangeToken=result['ChangeToken']
- )
+ client,
+ "change_token_in_sync",
+ ).wait(ChangeToken=result["ChangeToken"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update Web ACL')
+ module.fail_json_aws(e, msg="Could not update Web ACL")
if insertions:
try:
- params['Updates'] = insertions
+ params["Updates"] = insertions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
- change_tokens.append(result['ChangeToken'])
+ change_tokens.append(result["ChangeToken"])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update Web ACL')
+ module.fail_json_aws(e, msg="Could not update Web ACL")
if change_tokens:
for token in change_tokens:
get_waiter(
- client, 'change_token_in_sync',
- ).wait(
- ChangeToken=token
- )
+ client,
+ "change_token_in_sync",
+ ).wait(ChangeToken=token)
if changed:
acl = get_web_acl(client, module, web_acl_id)
return changed, acl
@@ -282,77 +283,79 @@ def format_for_update(rule, action):
return dict(
Action=action,
ActivatedRule=dict(
- Priority=rule['Priority'],
- RuleId=rule['RuleId'],
- Action=dict(
- Type=rule['Action']['Type']
- )
- )
+ Priority=rule["Priority"],
+ RuleId=rule["RuleId"],
+ Action=dict(Type=rule["Action"]["Type"]),
+ ),
)
def remove_rules_from_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
- deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
+ deletions = [format_for_update(rule, "DELETE") for rule in acl["Rules"]]
try:
- params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
+ params = {"WebACLId": acl["WebACLId"], "DefaultAction": acl["DefaultAction"], "Updates": deletions}
run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not remove rule')
+ module.fail_json_aws(e, msg="Could not remove rule")
def ensure_web_acl_present(client, module):
changed = False
result = None
- name = module.params['name']
+ name = module.params["name"]
web_acl_id = get_web_acl_by_name(client, module, name)
if web_acl_id:
(changed, result) = find_and_update_web_acl(client, module, web_acl_id)
else:
- metric_name = module.params['metric_name']
+ metric_name = module.params["metric_name"]
if not metric_name:
- metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
- default_action = module.params['default_action'].upper()
+ metric_name = re.sub(r"[^A-Za-z0-9]", "", module.params["name"])
+ default_action = module.params["default_action"].upper()
try:
- params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
+ params = {"Name": name, "MetricName": metric_name, "DefaultAction": {"Type": default_action}}
new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not create Web ACL')
- (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
+ module.fail_json_aws(e, msg="Could not create Web ACL")
+ (changed, result) = find_and_update_web_acl(client, module, new_web_acl["WebACL"]["WebACLId"])
return changed, result
def ensure_web_acl_absent(client, module):
- web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
+ web_acl_id = get_web_acl_by_name(client, module, module.params["name"])
if web_acl_id:
web_acl = get_web_acl(client, module, web_acl_id)
- if web_acl['Rules']:
+ if web_acl["Rules"]:
remove_rules_from_web_acl(client, module, web_acl_id)
try:
- run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
+ run_func_with_change_token_backoff(
+ client, module, {"WebACLId": web_acl_id}, client.delete_web_acl, wait=True
+ )
return True, {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not delete Web ACL')
+ module.fail_json_aws(e, msg="Could not delete Web ACL")
return False, {}
def main():
argument_spec = dict(
name=dict(required=True),
- default_action=dict(choices=['block', 'allow', 'count']),
+ default_action=dict(choices=["block", "allow", "count"]),
metric_name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- rules=dict(type='list', elements='dict'),
- purge_rules=dict(type='bool', default=False),
- waf_regional=dict(type='bool', default=False)
+ state=dict(default="present", choices=["present", "absent"]),
+ rules=dict(type="list", elements="dict"),
+ purge_rules=dict(type="bool", default=False),
+ waf_regional=dict(type="bool", default=False),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[["state", "present", ["default_action", "rules"]]],
)
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['default_action', 'rules']]])
- state = module.params.get('state')
+ state = module.params.get("state")
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
+ resource = "waf" if not module.params["waf_regional"] else "waf-regional"
client = module.client(resource)
- if state == 'present':
+ if state == "present":
(changed, results) = ensure_web_acl_present(client, module)
else:
(changed, results) = ensure_web_acl_absent(client, module)
@@ -360,5 +363,5 @@ def main():
module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py
index 7a9011e9b..b96ba0cb1 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_ip_set
version_added: 1.5.0
@@ -63,14 +62,13 @@ notes:
- Support for I(purge_tags) was added in release 4.0.0.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
- - amazon.aws.boto3
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
+ - amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: test ip set
wafv2_ip_set:
name: test02
@@ -84,9 +82,9 @@ EXAMPLES = '''
tags:
A: B
C: D
-'''
+"""
-RETURN = """
+RETURN = r"""
addresses:
description: Current addresses of the ip set
sample:
@@ -117,13 +115,16 @@ name:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags
@@ -137,41 +138,36 @@ class IpSet:
self.existing_set, self.id, self.locktoken, self.arn = self.get_set()
def description(self):
- return self.existing_set.get('Description')
+ return self.existing_set.get("Description")
def _format_set(self, ip_set):
if ip_set is None:
return None
- return camel_dict_to_snake_dict(self.existing_set, ignore_list=['tags'])
+ return camel_dict_to_snake_dict(self.existing_set, ignore_list=["tags"])
def get(self):
return self._format_set(self.existing_set)
def remove(self):
try:
- response = self.wafv2.delete_ip_set(
- Name=self.name,
- Scope=self.scope,
- Id=self.id,
- LockToken=self.locktoken
- )
+ response = self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken)
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.")
return {}
def create(self, description, ip_address_version, addresses, tags):
req_obj = {
- 'Name': self.name,
- 'Scope': self.scope,
- 'IPAddressVersion': ip_address_version,
- 'Addresses': addresses,
+ "Name": self.name,
+ "Scope": self.scope,
+ "IPAddressVersion": ip_address_version,
+ "Addresses": addresses,
}
if description:
- req_obj['Description'] = description
+ req_obj["Description"] = description
if tags:
- req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+ req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags)
try:
response = self.wafv2.create_ip_set(**req_obj)
@@ -183,15 +179,15 @@ class IpSet:
def update(self, description, addresses):
req_obj = {
- 'Name': self.name,
- 'Scope': self.scope,
- 'Id': self.id,
- 'Addresses': addresses,
- 'LockToken': self.locktoken
+ "Name": self.name,
+ "Scope": self.scope,
+ "Id": self.id,
+ "Addresses": addresses,
+ "LockToken": self.locktoken,
}
if description:
- req_obj['Description'] = description
+ req_obj["Description"] = description
try:
response = self.wafv2.update_ip_set(**req_obj)
@@ -207,38 +203,31 @@ class IpSet:
id = None
arn = None
locktoken = None
- for item in response.get('IPSets'):
- if item.get('Name') == self.name:
- id = item.get('Id')
- locktoken = item.get('LockToken')
- arn = item.get('ARN')
+ for item in response.get("IPSets"):
+ if item.get("Name") == self.name:
+ id = item.get("Id")
+ locktoken = item.get("LockToken")
+ arn = item.get("ARN")
if id:
try:
- existing_set = self.wafv2.get_ip_set(
- Name=self.name,
- Scope=self.scope,
- Id=id
- ).get('IPSet')
+ existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=id).get("IPSet")
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to get wafv2 ip set.")
tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws)
- existing_set['tags'] = tags
+ existing_set["tags"] = tags
return existing_set, id, locktoken, arn
def list(self, Nextmarker=None):
# there is currently no paginator for wafv2
- req_obj = {
- 'Scope': self.scope,
- 'Limit': 100
- }
+ req_obj = {"Scope": self.scope, "Limit": 100}
if Nextmarker:
- req_obj['NextMarker'] = Nextmarker
+ req_obj["NextMarker"] = Nextmarker
try:
response = self.wafv2.list_ip_sets(**req_obj)
- if response.get('NextMarker'):
- response['IPSets'] += self.list(Nextmarker=response.get('NextMarker')).get('IPSets')
+ if response.get("NextMarker"):
+ response["IPSets"] += self.list(Nextmarker=response.get("NextMarker")).get("IPSets")
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to list wafv2 ip set.")
@@ -248,11 +237,11 @@ class IpSet:
def compare(existing_set, addresses, purge_addresses, state):
diff = False
new_rules = []
- existing_rules = existing_set.get('addresses')
- if state == 'present':
+ existing_rules = existing_set.get("addresses")
+ if state == "present":
if purge_addresses:
new_rules = addresses
- if sorted(addresses) != sorted(existing_set.get('addresses')):
+ if sorted(addresses) != sorted(existing_set.get("addresses")):
diff = True
else:
@@ -274,23 +263,22 @@ def compare(existing_set, addresses, purge_addresses, state):
def main():
-
arg_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']),
- description=dict(type='str'),
- ip_address_version=dict(type='str', choices=['IPV4', 'IPV6']),
- addresses=dict(type='list', elements='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(type='bool', default=True),
- purge_addresses=dict(type='bool', default=True),
+ state=dict(type="str", required=True, choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]),
+ description=dict(type="str"),
+ ip_address_version=dict(type="str", choices=["IPV4", "IPV6"]),
+ addresses=dict(type="list", elements="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ purge_addresses=dict(type="bool", default=True),
)
module = AnsibleAWSModule(
argument_spec=arg_spec,
supports_check_mode=True,
- required_if=[['state', 'present', ['ip_address_version', 'addresses']]]
+ required_if=[["state", "present", ["ip_address_version", "addresses"]]],
)
state = module.params.get("state")
@@ -304,17 +292,18 @@ def main():
purge_addresses = module.params.get("purge_addresses")
check_mode = module.check_mode
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
change = False
retval = {}
ip_set = IpSet(wafv2, name, scope, module.fail_json_aws)
- if state == 'present':
-
+ if state == "present":
if ip_set.get():
- tags_updated = ensure_wafv2_tags(wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode)
+ tags_updated = ensure_wafv2_tags(
+ wafv2, ip_set.arn, tags, purge_tags, module.fail_json_aws, module.check_mode
+ )
ips_updated, addresses = compare(ip_set.get(), addresses, purge_addresses, state)
description_updated = bool(description) and ip_set.description() != description
change = ips_updated or description_updated or tags_updated
@@ -322,32 +311,23 @@ def main():
if module.check_mode:
pass
elif ips_updated or description_updated:
- retval = ip_set.update(
- description=description,
- addresses=addresses
- )
+ retval = ip_set.update(description=description, addresses=addresses)
elif tags_updated:
retval, id, locktoken, arn = ip_set.get_set()
else:
if not check_mode:
retval = ip_set.create(
- description=description,
- ip_address_version=ip_address_version,
- addresses=addresses,
- tags=tags
+ description=description, ip_address_version=ip_address_version, addresses=addresses, tags=tags
)
change = True
- if state == 'absent':
+ if state == "absent":
if ip_set.get():
if addresses:
if len(addresses) > 0:
change, addresses = compare(ip_set.get(), addresses, purge_addresses, state)
if change and not check_mode:
- retval = ip_set.update(
- description=description,
- addresses=addresses
- )
+ retval = ip_set.update(description=description, addresses=addresses)
else:
if not check_mode:
retval = ip_set.remove()
@@ -356,5 +336,5 @@ def main():
module.exit_json(changed=change, **retval)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py
index b92c9a816..caca5cd70 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_ip_set_info.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_ip_set_info
version_added: 1.5.0
@@ -28,20 +27,19 @@ options:
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: test ip set
wafv2_ip_set_info:
name: test02
scope: REGIONAL
-'''
+"""
-RETURN = """
+RETURN = r"""
addresses:
description: Current addresses of the ip set
sample:
@@ -72,28 +70,29 @@ name:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None):
# there is currently no paginator for wafv2
- req_obj = {
- 'Scope': scope,
- 'Limit': 100
- }
+ req_obj = {"Scope": scope, "Limit": 100}
if Nextmarker:
- req_obj['NextMarker'] = Nextmarker
+ req_obj["NextMarker"] = Nextmarker
try:
response = wafv2.list_ip_sets(**req_obj)
- if response.get('NextMarker'):
- response['IPSets'] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get('NextMarker')).get('IPSets')
+ if response.get("NextMarker"):
+ response["IPSets"] += list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=response.get("NextMarker")).get(
+ "IPSets"
+ )
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to list wafv2 ip set")
return response
@@ -101,21 +100,15 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None):
def get_ip_set(wafv2, name, scope, id, fail_json_aws):
try:
- response = wafv2.get_ip_set(
- Name=name,
- Scope=scope,
- Id=id
- )
+ response = wafv2.get_ip_set(Name=name, Scope=scope, Id=id)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to get wafv2 ip set")
return response
def main():
-
arg_spec = dict(
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ name=dict(type="str", required=True), scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"])
)
module = AnsibleAWSModule(
@@ -126,26 +119,26 @@ def main():
name = module.params.get("name")
scope = module.params.get("scope")
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
# check if ip set exist
response = list_ip_sets(wafv2, scope, module.fail_json_aws)
id = None
- for item in response.get('IPSets'):
- if item.get('Name') == name:
- id = item.get('Id')
- arn = item.get('ARN')
+ for item in response.get("IPSets"):
+ if item.get("Name") == name:
+ id = item.get("Id")
+ arn = item.get("ARN")
retval = {}
existing_set = None
if id:
existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws)
- retval = camel_dict_to_snake_dict(existing_set.get('IPSet'))
- retval['tags'] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {}
+ retval = camel_dict_to_snake_dict(existing_set.get("IPSet"))
+ retval["tags"] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {}
module.exit_json(**retval)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_resources.py b/ansible_collections/community/aws/plugins/modules/wafv2_resources.py
index 527ee1087..b36f51712 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_resources.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_resources.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_resources
version_added: 1.5.0
@@ -37,22 +36,21 @@ options:
required: true
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: add test alb to waf string03
community.aws.wafv2_resources:
name: string03
scope: REGIONAL
state: present
arn: "arn:aws:elasticloadbalancing:eu-central-1:111111111:loadbalancer/app/test03/dd83ea041ba6f933"
-'''
+"""
-RETURN = """
+RETURN = r"""
resource_arns:
description: Current resources where the wafv2 is applied on
sample:
@@ -62,22 +60,20 @@ resource_arns:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
def get_web_acl(wafv2, name, scope, id, fail_json_aws):
try:
- response = wafv2.get_web_acl(
- Name=name,
- Scope=scope,
- Id=id
- )
+ response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to get wafv2 web acl.")
return response
@@ -85,9 +81,7 @@ def get_web_acl(wafv2, name, scope, id, fail_json_aws):
def list_wafv2_resources(wafv2, arn, fail_json_aws):
try:
- response = wafv2.list_resources_for_web_acl(
- WebACLArn=arn
- )
+ response = wafv2.list_resources_for_web_acl(WebACLArn=arn)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to list wafv2 web acl.")
return response
@@ -95,10 +89,7 @@ def list_wafv2_resources(wafv2, arn, fail_json_aws):
def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws):
try:
- response = wafv2.associate_web_acl(
- WebACLArn=waf_arn,
- ResourceArn=arn
- )
+ response = wafv2.associate_web_acl(WebACLArn=waf_arn, ResourceArn=arn)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to add wafv2 web acl.")
return response
@@ -106,27 +97,24 @@ def add_wafv2_resources(wafv2, waf_arn, arn, fail_json_aws):
def remove_resources(wafv2, arn, fail_json_aws):
try:
- response = wafv2.disassociate_web_acl(
- ResourceArn=arn
- )
+ response = wafv2.disassociate_web_acl(ResourceArn=arn)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to remove wafv2 web acl.")
return response
def main():
-
arg_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- name=dict(type='str'),
- scope=dict(type='str', choices=['CLOUDFRONT', 'REGIONAL']),
- arn=dict(type='str', required=True)
+ state=dict(type="str", required=True, choices=["present", "absent"]),
+ name=dict(type="str"),
+ scope=dict(type="str", choices=["CLOUDFRONT", "REGIONAL"]),
+ arn=dict(type="str", required=True),
)
module = AnsibleAWSModule(
argument_spec=arg_spec,
supports_check_mode=True,
- required_if=[['state', 'present', ['name', 'scope']]]
+ required_if=[["state", "present", ["name", "scope"]]],
)
state = module.params.get("state")
@@ -135,7 +123,7 @@ def main():
arn = module.params.get("arn")
check_mode = module.check_mode
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
# check if web acl exists
@@ -145,26 +133,26 @@ def main():
retval = {}
change = False
- for item in response.get('WebACLs'):
- if item.get('Name') == name:
- id = item.get('Id')
+ for item in response.get("WebACLs"):
+ if item.get("Name") == name:
+ id = item.get("Id")
if id:
existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws)
- waf_arn = existing_acl.get('WebACL').get('ARN')
+ waf_arn = existing_acl.get("WebACL").get("ARN")
retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws)
- if state == 'present':
+ if state == "present":
if retval:
- if arn not in retval.get('ResourceArns'):
+ if arn not in retval.get("ResourceArns"):
change = True
if not check_mode:
retval = add_wafv2_resources(wafv2, waf_arn, arn, module.fail_json_aws)
- elif state == 'absent':
+ elif state == "absent":
if retval:
- if arn in retval.get('ResourceArns'):
+ if arn in retval.get("ResourceArns"):
change = True
if not check_mode:
retval = remove_resources(wafv2, arn, module.fail_json_aws)
@@ -172,5 +160,5 @@ def main():
module.exit_json(changed=change, **camel_dict_to_snake_dict(retval))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py
index 3a2a7b5dd..5cafee1f6 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_resources_info.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_resources_info
version_added: 1.5.0
@@ -28,20 +27,19 @@ options:
type: str
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: get web acl
community.aws.wafv2_resources_info:
name: string03
scope: REGIONAL
-'''
+"""
-RETURN = """
+RETURN = r"""
resource_arns:
description: Current resources where the wafv2 is applied on
sample:
@@ -51,22 +49,20 @@ resource_arns:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
def get_web_acl(wafv2, name, scope, id, fail_json_aws):
try:
- response = wafv2.get_web_acl(
- Name=name,
- Scope=scope,
- Id=id
- )
+ response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to get wafv2 web acl.")
return response
@@ -78,19 +74,16 @@ def list_web_acls(wafv2, scope, fail_json_aws):
def list_wafv2_resources(wafv2, arn, fail_json_aws):
try:
- response = wafv2.list_resources_for_web_acl(
- WebACLArn=arn
- )
+ response = wafv2.list_resources_for_web_acl(WebACLArn=arn)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to list wafv2 resources.")
return response
def main():
-
arg_spec = dict(
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ name=dict(type="str", required=True),
+ scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]),
)
module = AnsibleAWSModule(
@@ -101,25 +94,25 @@ def main():
name = module.params.get("name")
scope = module.params.get("scope")
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
# check if web acl exists
response = list_web_acls(wafv2, scope, module.fail_json_aws)
id = None
retval = {}
- for item in response.get('WebACLs'):
- if item.get('Name') == name:
- id = item.get('Id')
+ for item in response.get("WebACLs"):
+ if item.get("Name") == name:
+ id = item.get("Id")
if id:
existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws)
- arn = existing_acl.get('WebACL').get('ARN')
+ arn = existing_acl.get("WebACL").get("ARN")
retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws))
module.exit_json(**retval)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py
index 8e46853c8..e2a7fd1d4 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_rule_group
version_added: 1.5.0
@@ -67,14 +66,13 @@ options:
type: bool
extends_documentation_fragment:
-- amazon.aws.aws
-- amazon.aws.ec2
-- amazon.aws.tags
-- amazon.aws.boto3
-
-'''
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: change description
community.aws.wafv2_rule_group:
name: test02
@@ -150,9 +148,9 @@ EXAMPLES = '''
A: B
C: D
register: out
-'''
+"""
-RETURN = """
+RETURN = r"""
arn:
description: Rule group arn
sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7
@@ -200,19 +198,22 @@ visibility_config:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules
-from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
-from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict
from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_snake_dict_to_camel_dict
class RuleGroup:
@@ -226,20 +227,20 @@ class RuleGroup:
def update(self, description, rules, sampled_requests, cloudwatch_metrics, metric_name):
req_obj = {
- 'Name': self.name,
- 'Scope': self.scope,
- 'Id': self.id,
- 'Rules': rules,
- 'LockToken': self.locktoken,
- 'VisibilityConfig': {
- 'SampledRequestsEnabled': sampled_requests,
- 'CloudWatchMetricsEnabled': cloudwatch_metrics,
- 'MetricName': metric_name
- }
+ "Name": self.name,
+ "Scope": self.scope,
+ "Id": self.id,
+ "Rules": rules,
+ "LockToken": self.locktoken,
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": sampled_requests,
+ "CloudWatchMetricsEnabled": cloudwatch_metrics,
+ "MetricName": metric_name,
+ },
}
if description:
- req_obj['Description'] = description
+ req_obj["Description"] = description
try:
response = self.wafv2.update_rule_group(**req_obj)
@@ -251,11 +252,11 @@ class RuleGroup:
if self.id is None:
response = self.list()
- for item in response.get('RuleGroups'):
- if item.get('Name') == self.name:
- self.id = item.get('Id')
- self.locktoken = item.get('LockToken')
- self.arn = item.get('ARN')
+ for item in response.get("RuleGroups"):
+ if item.get("Name") == self.name:
+ self.id = item.get("Id")
+ self.locktoken = item.get("LockToken")
+ self.arn = item.get("ARN")
return self.refresh_group()
@@ -263,18 +264,14 @@ class RuleGroup:
existing_group = None
if self.id:
try:
- response = self.wafv2.get_rule_group(
- Name=self.name,
- Scope=self.scope,
- Id=self.id
- )
- existing_group = response.get('RuleGroup')
- self.locktoken = response.get('LockToken')
+ response = self.wafv2.get_rule_group(Name=self.name, Scope=self.scope, Id=self.id)
+ existing_group = response.get("RuleGroup")
+ self.locktoken = response.get("LockToken")
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to get wafv2 rule group.")
tags = describe_wafv2_tags(self.wafv2, self.arn, self.fail_json_aws)
- existing_group['tags'] = tags or {}
+ existing_group["tags"] = tags or {}
return existing_group
@@ -289,10 +286,7 @@ class RuleGroup:
def remove(self):
try:
response = self.wafv2.delete_rule_group(
- Name=self.name,
- Scope=self.scope,
- Id=self.id,
- LockToken=self.locktoken
+ Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken
)
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to delete wafv2 rule group.")
@@ -300,22 +294,22 @@ class RuleGroup:
def create(self, capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags):
req_obj = {
- 'Name': self.name,
- 'Scope': self.scope,
- 'Capacity': capacity,
- 'Rules': rules,
- 'VisibilityConfig': {
- 'SampledRequestsEnabled': sampled_requests,
- 'CloudWatchMetricsEnabled': cloudwatch_metrics,
- 'MetricName': metric_name
- }
+ "Name": self.name,
+ "Scope": self.scope,
+ "Capacity": capacity,
+ "Rules": rules,
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": sampled_requests,
+ "CloudWatchMetricsEnabled": cloudwatch_metrics,
+ "MetricName": metric_name,
+ },
}
if description:
- req_obj['Description'] = description
+ req_obj["Description"] = description
if tags:
- req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+ req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags)
try:
response = self.wafv2.create_rule_group(**req_obj)
@@ -328,26 +322,25 @@ class RuleGroup:
def main():
-
arg_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']),
- capacity=dict(type='int'),
- description=dict(type='str'),
- rules=dict(type='list', elements='dict'),
- sampled_requests=dict(type='bool', default=False),
- cloudwatch_metrics=dict(type='bool', default=True),
- metric_name=dict(type='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, type='bool'),
- purge_rules=dict(default=True, type='bool'),
+ state=dict(type="str", required=True, choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]),
+ capacity=dict(type="int"),
+ description=dict(type="str"),
+ rules=dict(type="list", elements="dict"),
+ sampled_requests=dict(type="bool", default=False),
+ cloudwatch_metrics=dict(type="bool", default=True),
+ metric_name=dict(type="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
+ purge_rules=dict(default=True, type="bool"),
)
module = AnsibleAWSModule(
argument_spec=arg_spec,
supports_check_mode=True,
- required_if=[['state', 'present', ['capacity', 'rules']]]
+ required_if=[["state", "present", ["capacity", "rules"]]],
)
state = module.params.get("state")
@@ -372,31 +365,26 @@ def main():
if not metric_name:
metric_name = name
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
rule_group = RuleGroup(wafv2, name, scope, module.fail_json_aws)
change = False
retval = {}
- if state == 'present':
+ if state == "present":
if rule_group.get():
- tagging_change = ensure_wafv2_tags(wafv2, rule_group.arn, tags, purge_tags,
- module.fail_json_aws, module.check_mode)
- rules_change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state)
- description_change = bool(description) and (rule_group.get().get('Description') != description)
+ tagging_change = ensure_wafv2_tags(
+ wafv2, rule_group.arn, tags, purge_tags, module.fail_json_aws, module.check_mode
+ )
+ rules_change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state)
+ description_change = bool(description) and (rule_group.get().get("Description") != description)
change = tagging_change or rules_change or description_change
retval = rule_group.get()
if module.check_mode:
# In check mode nothing changes...
pass
elif rules_change or description_change:
- retval = rule_group.update(
- description,
- rules,
- sampled_requests,
- cloudwatch_metrics,
- metric_name
- )
+ retval = rule_group.update(description, rules, sampled_requests, cloudwatch_metrics, metric_name)
elif tagging_change:
retval = rule_group.refresh_group()
@@ -404,35 +392,25 @@ def main():
change = True
if not check_mode:
retval = rule_group.create(
- capacity,
- description,
- rules,
- sampled_requests,
- cloudwatch_metrics,
- metric_name,
- tags
+ capacity, description, rules, sampled_requests, cloudwatch_metrics, metric_name, tags
)
- elif state == 'absent':
+ elif state == "absent":
if rule_group.get():
if rules:
if len(rules) > 0:
- change, rules = compare_priority_rules(rule_group.get().get('Rules'), rules, purge_rules, state)
+ change, rules = compare_priority_rules(rule_group.get().get("Rules"), rules, purge_rules, state)
if change and not check_mode:
retval = rule_group.update(
- description,
- rules,
- sampled_requests,
- cloudwatch_metrics,
- metric_name
+ description, rules, sampled_requests, cloudwatch_metrics, metric_name
)
else:
change = True
if not check_mode:
retval = rule_group.remove()
- module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=['tags']))
+ module.exit_json(changed=change, **camel_dict_to_snake_dict(retval, ignore_list=["tags"]))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py
index a42bea0c2..58862a9a5 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_rule_group_info.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_rule_group_info
version_added: 1.5.0
@@ -15,11 +14,6 @@ short_description: wafv2_web_acl_info
description:
- Get informations about existing wafv2 rule groups.
options:
- state:
- description:
- - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01.
- required: false
- type: str
name:
description:
- The name of the rule group.
@@ -33,21 +27,19 @@ options:
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: rule group info
community.aws.wafv2_rule_group_info:
name: test02
- state: present
scope: REGIONAL
-'''
+"""
-RETURN = """
+RETURN = r"""
arn:
description: Rule group arn
sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7
@@ -95,23 +87,21 @@ visibility_config:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
+from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
def get_rule_group(wafv2, name, scope, id, fail_json_aws):
try:
- response = wafv2.get_rule_group(
- Name=name,
- Scope=scope,
- Id=id
- )
+ response = wafv2.get_rule_group(Name=name, Scope=scope, Id=id)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to get wafv2 rule group.")
return response
@@ -119,46 +109,39 @@ def get_rule_group(wafv2, name, scope, id, fail_json_aws):
def main():
arg_spec = dict(
- state=dict(type='str', required=False),
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ name=dict(type="str", required=True),
+ scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]),
)
module = AnsibleAWSModule(
argument_spec=arg_spec,
- supports_check_mode=True
+ supports_check_mode=True,
)
- state = module.params.get("state")
name = module.params.get("name")
scope = module.params.get("scope")
- wafv2 = module.client('wafv2')
-
- if state:
- module.deprecate(
- 'The state parameter does nothing, has been deprecated, and will be removed in a future release.',
- version='6.0.0', collection_name='community.aws')
+ wafv2 = module.client("wafv2")
# check if rule group exists
response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws)
id = None
retval = {}
- for item in response.get('RuleGroups'):
- if item.get('Name') == name:
- id = item.get('Id')
- arn = item.get('ARN')
+ for item in response.get("RuleGroups"):
+ if item.get("Name") == name:
+ id = item.get("Id")
+ arn = item.get("ARN")
existing_group = None
if id:
existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws)
- retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup'))
+ retval = camel_dict_to_snake_dict(existing_group.get("RuleGroup"))
tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws)
- retval['tags'] = tags or {}
+ retval["tags"] = tags or {}
module.exit_json(**retval)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py
index f91fe64e6..054c093c5 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_web_acl
version_added: 1.5.0
@@ -89,7 +88,6 @@ options:
- A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing
- the key of the body dictionary element.
- Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content).
- - Requires botocore >= 1.20.40
type: dict
version_added: 3.1.0
purge_rules:
@@ -102,14 +100,13 @@ notes:
- Support for the I(purge_tags) parameter was added in release 4.0.0.
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.tags
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create test web acl
community.aws.wafv2_web_acl:
name: test05
@@ -249,10 +246,9 @@ EXAMPLES = '''
content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }'
region: us-east-1
state: present
+"""
-'''
-
-RETURN = """
+RETURN = r"""
arn:
description: web acl arn
sample: arn:aws:wafv2:eu-central-1:123456789012:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61
@@ -315,14 +311,17 @@ visibility_config:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import compare_priority_rules
from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
from ansible_collections.community.aws.plugins.module_utils.wafv2 import ensure_wafv2_tags
@@ -338,26 +337,35 @@ class WebACL:
self.fail_json_aws = fail_json_aws
self.existing_acl, self.id, self.locktoken = self.get_web_acl()
- def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies):
+ def update(
+ self,
+ default_action,
+ description,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name,
+ custom_response_bodies,
+ ):
req_obj = {
- 'Name': self.name,
- 'Scope': self.scope,
- 'Id': self.id,
- 'DefaultAction': default_action,
- 'Rules': rules,
- 'VisibilityConfig': {
- 'SampledRequestsEnabled': sampled_requests,
- 'CloudWatchMetricsEnabled': cloudwatch_metrics,
- 'MetricName': metric_name
+ "Name": self.name,
+ "Scope": self.scope,
+ "Id": self.id,
+ "DefaultAction": default_action,
+ "Rules": rules,
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": sampled_requests,
+ "CloudWatchMetricsEnabled": cloudwatch_metrics,
+ "MetricName": metric_name,
},
- 'LockToken': self.locktoken
+ "LockToken": self.locktoken,
}
if description:
- req_obj['Description'] = description
+ req_obj["Description"] = description
if custom_response_bodies:
- req_obj['CustomResponseBodies'] = custom_response_bodies
+ req_obj["CustomResponseBodies"] = custom_response_bodies
try:
response = self.wafv2.update_web_acl(**req_obj)
@@ -369,12 +377,7 @@ class WebACL:
def remove(self):
try:
- response = self.wafv2.delete_web_acl(
- Name=self.name,
- Scope=self.scope,
- Id=self.id,
- LockToken=self.locktoken
- )
+ response = self.wafv2.delete_web_acl(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken)
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.")
return response
@@ -391,47 +394,53 @@ class WebACL:
existing_acl = None
response = self.list()
- for item in response.get('WebACLs'):
- if item.get('Name') == self.name:
- id = item.get('Id')
- locktoken = item.get('LockToken')
- arn = item.get('ARN')
+ for item in response.get("WebACLs"):
+ if item.get("Name") == self.name:
+ id = item.get("Id")
+ locktoken = item.get("LockToken")
+ arn = item.get("ARN")
if id:
try:
- existing_acl = self.wafv2.get_web_acl(
- Name=self.name,
- Scope=self.scope,
- Id=id
- )
+ existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=id)
except (BotoCoreError, ClientError) as e:
self.fail_json_aws(e, msg="Failed to get wafv2 web acl.")
tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws)
- existing_acl['tags'] = tags
+ existing_acl["tags"] = tags
return existing_acl, id, locktoken
def list(self):
return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws)
- def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies):
+ def create(
+ self,
+ default_action,
+ rules,
+ sampled_requests,
+ cloudwatch_metrics,
+ metric_name,
+ tags,
+ description,
+ custom_response_bodies,
+ ):
req_obj = {
- 'Name': self.name,
- 'Scope': self.scope,
- 'DefaultAction': default_action,
- 'Rules': rules,
- 'VisibilityConfig': {
- 'SampledRequestsEnabled': sampled_requests,
- 'CloudWatchMetricsEnabled': cloudwatch_metrics,
- 'MetricName': metric_name
- }
+ "Name": self.name,
+ "Scope": self.scope,
+ "DefaultAction": default_action,
+ "Rules": rules,
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": sampled_requests,
+ "CloudWatchMetricsEnabled": cloudwatch_metrics,
+ "MetricName": metric_name,
+ },
}
if custom_response_bodies:
- req_obj['CustomResponseBodies'] = custom_response_bodies
+ req_obj["CustomResponseBodies"] = custom_response_bodies
if description:
- req_obj['Description'] = description
+ req_obj["Description"] = description
if tags:
- req_obj['Tags'] = ansible_dict_to_boto3_tag_list(tags)
+ req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags)
try:
response = self.wafv2.create_web_acl(**req_obj)
@@ -443,7 +452,6 @@ class WebACL:
def format_result(result):
-
# We were returning details of the Web ACL inside a "web_acl" parameter on
# creation, keep returning it to avoid breaking existing playbooks, but also
# return what the docs said we return (and returned when no change happened)
@@ -451,31 +459,30 @@ def format_result(result):
if "WebACL" in retval:
retval.update(retval["WebACL"])
- return camel_dict_to_snake_dict(retval, ignore_list=['tags'])
+ return camel_dict_to_snake_dict(retval, ignore_list=["tags"])
def main():
-
arg_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']),
- description=dict(type='str'),
- default_action=dict(type='str', choices=['Block', 'Allow']),
- rules=dict(type='list', elements='dict'),
- sampled_requests=dict(type='bool', default=False),
- cloudwatch_metrics=dict(type='bool', default=True),
- metric_name=dict(type='str'),
- tags=dict(type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, type='bool'),
- custom_response_bodies=dict(type='dict'),
- purge_rules=dict(default=True, type='bool'),
+ state=dict(type="str", required=True, choices=["present", "absent"]),
+ name=dict(type="str", required=True),
+ scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]),
+ description=dict(type="str"),
+ default_action=dict(type="str", choices=["Block", "Allow"]),
+ rules=dict(type="list", elements="dict"),
+ sampled_requests=dict(type="bool", default=False),
+ cloudwatch_metrics=dict(type="bool", default=True),
+ metric_name=dict(type="str"),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(default=True, type="bool"),
+ custom_response_bodies=dict(type="dict"),
+ purge_rules=dict(default=True, type="bool"),
)
module = AnsibleAWSModule(
argument_spec=arg_spec,
supports_check_mode=True,
- required_if=[['state', 'present', ['default_action', 'rules']]]
+ required_if=[["state", "present", ["default_action", "rules"]]],
)
state = module.params.get("state")
@@ -494,16 +501,15 @@ def main():
custom_response_bodies = module.params.get("custom_response_bodies")
if custom_response_bodies:
- module.require_botocore_at_least('1.20.40', reason='to set custom response bodies')
custom_response_bodies = {}
for custom_name, body in module.params.get("custom_response_bodies").items():
custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True)
- if default_action == 'Block':
- default_action = {'Block': {}}
- elif default_action == 'Allow':
- default_action = {'Allow': {}}
+ if default_action == "Block":
+ default_action = {"Block": {}}
+ elif default_action == "Allow":
+ default_action = {"Allow": {}}
if rules:
rules = []
@@ -513,17 +519,19 @@ def main():
if not metric_name:
metric_name = name
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
web_acl = WebACL(wafv2, name, scope, module.fail_json_aws)
change = False
retval = {}
- if state == 'present':
+ if state == "present":
if web_acl.get():
- tags_changed = ensure_wafv2_tags(wafv2, web_acl.get().get('WebACL').get('ARN'), tags, purge_tags, module.fail_json_aws, module.check_mode)
- change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state)
- change = change or (description and web_acl.get().get('WebACL').get('Description') != description)
- change = change or (default_action and web_acl.get().get('WebACL').get('DefaultAction') != default_action)
+ tags_changed = ensure_wafv2_tags(
+ wafv2, web_acl.get().get("WebACL").get("ARN"), tags, purge_tags, module.fail_json_aws, module.check_mode
+ )
+ change, rules = compare_priority_rules(web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state)
+ change = change or (description and web_acl.get().get("WebACL").get("Description") != description)
+ change = change or (default_action and web_acl.get().get("WebACL").get("DefaultAction") != default_action)
if change and not check_mode:
retval = web_acl.update(
@@ -533,7 +541,7 @@ def main():
sampled_requests,
cloudwatch_metrics,
metric_name,
- custom_response_bodies
+ custom_response_bodies,
)
elif tags_changed:
retval, id, locktoken = web_acl.get_web_acl()
@@ -553,14 +561,16 @@ def main():
metric_name,
tags,
description,
- custom_response_bodies
+ custom_response_bodies,
)
- elif state == 'absent':
+ elif state == "absent":
if web_acl.get():
if rules:
if len(rules) > 0:
- change, rules = compare_priority_rules(web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state)
+ change, rules = compare_priority_rules(
+ web_acl.get().get("WebACL").get("Rules"), rules, purge_rules, state
+ )
if change and not check_mode:
retval = web_acl.update(
default_action,
@@ -569,7 +579,7 @@ def main():
sampled_requests,
cloudwatch_metrics,
metric_name,
- custom_response_bodies
+ custom_response_bodies,
)
else:
change = True
@@ -579,5 +589,5 @@ def main():
module.exit_json(changed=change, **format_result(retval))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py
index 13be05db5..e3cdc46e3 100644
--- a/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py
+++ b/ansible_collections/community/aws/plugins/modules/wafv2_web_acl_info.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
---
module: wafv2_web_acl_info
version_added: 1.5.0
@@ -28,21 +27,20 @@ options:
type: str
extends_documentation_fragment:
- - amazon.aws.aws
- - amazon.aws.ec2
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
- amazon.aws.boto3
+"""
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: get web acl
community.aws.wafv2_web_acl_info:
name: test05
scope: REGIONAL
register: out
-'''
+"""
-RETURN = """
+RETURN = r"""
arn:
description: web acl arn
sample: arn:aws:wafv2:eu-central-1:11111111:regional/webacl/test05/318c1ab9-fa74-4b3b-a974-f92e25106f61
@@ -91,33 +89,30 @@ visibility_config:
"""
try:
- from botocore.exceptions import ClientError, BotoCoreError
+ from botocore.exceptions import BotoCoreError
+ from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
-from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls
def get_web_acl(wafv2, name, scope, id, fail_json_aws):
try:
- response = wafv2.get_web_acl(
- Name=name,
- Scope=scope,
- Id=id
- )
+ response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id)
except (BotoCoreError, ClientError) as e:
fail_json_aws(e, msg="Failed to get wafv2 web acl.")
return response
def main():
-
arg_spec = dict(
- name=dict(type='str', required=True),
- scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
+ name=dict(type="str", required=True),
+ scope=dict(type="str", required=True, choices=["CLOUDFRONT", "REGIONAL"]),
)
module = AnsibleAWSModule(
@@ -129,7 +124,7 @@ def main():
name = module.params.get("name")
scope = module.params.get("scope")
- wafv2 = module.client('wafv2')
+ wafv2 = module.client("wafv2")
# check if web acl exists
response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws)
@@ -137,19 +132,19 @@ def main():
arn = None
retval = {}
- for item in response.get('WebACLs'):
- if item.get('Name') == name:
- id = item.get('Id')
- arn = item.get('ARN')
+ for item in response.get("WebACLs"):
+ if item.get("Name") == name:
+ id = item.get("Id")
+ arn = item.get("ARN")
if id:
existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws)
- retval = camel_dict_to_snake_dict(existing_acl.get('WebACL'))
+ retval = camel_dict_to_snake_dict(existing_acl.get("WebACL"))
tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws)
- retval['tags'] = tags
+ retval["tags"] = tags
module.exit_json(**retval)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()