From 975f66f2eebe9dadba04f275774d4ab83f74cf25 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 13 Apr 2024 14:04:41 +0200 Subject: Adding upstream version 7.7.0+dfsg. Signed-off-by: Daniel Baumann --- .../plugins/modules/gcp_appengine_firewall_rule.py | 325 + .../modules/gcp_appengine_firewall_rule_info.py | 186 + .../cloud/plugins/modules/gcp_bigquery_dataset.py | 757 ++ .../plugins/modules/gcp_bigquery_dataset_info.py | 347 + .../cloud/plugins/modules/gcp_bigquery_table.py | 1713 +++++ .../plugins/modules/gcp_bigquery_table_info.py | 626 ++ .../cloud/plugins/modules/gcp_bigtable_instance.py | 535 ++ .../plugins/modules/gcp_bigtable_instance_info.py | 224 + .../plugins/modules/gcp_cloudbuild_trigger.py | 2246 ++++++ .../plugins/modules/gcp_cloudbuild_trigger_info.py | 850 +++ .../modules/gcp_cloudfunctions_cloud_function.py | 741 ++ .../gcp_cloudfunctions_cloud_function_info.py | 309 + .../plugins/modules/gcp_cloudscheduler_job.py | 999 +++ .../plugins/modules/gcp_cloudscheduler_job_info.py | 415 ++ .../cloud/plugins/modules/gcp_cloudtasks_queue.py | 700 ++ .../plugins/modules/gcp_cloudtasks_queue_info.py | 315 + .../cloud/plugins/modules/gcp_compute_address.py | 512 ++ .../plugins/modules/gcp_compute_address_info.py | 289 + .../plugins/modules/gcp_compute_autoscaler.py | 929 +++ .../plugins/modules/gcp_compute_autoscaler_info.py | 374 + .../plugins/modules/gcp_compute_backend_bucket.py | 637 ++ .../modules/gcp_compute_backend_bucket_info.py | 306 + .../plugins/modules/gcp_compute_backend_service.py | 2268 ++++++ .../modules/gcp_compute_backend_service_info.py | 816 ++ .../cloud/plugins/modules/gcp_compute_disk.py | 872 +++ .../cloud/plugins/modules/gcp_compute_disk_info.py | 429 ++ .../modules/gcp_compute_external_vpn_gateway.py | 426 ++ .../gcp_compute_external_vpn_gateway_info.py | 229 + .../cloud/plugins/modules/gcp_compute_firewall.py | 830 +++ .../plugins/modules/gcp_compute_firewall_info.py | 376 + .../plugins/modules/gcp_compute_forwarding_rule.py | 739 ++ .../modules/gcp_compute_forwarding_rule_info.py | 368 + .../plugins/modules/gcp_compute_global_address.py | 462 ++ .../modules/gcp_compute_global_address_info.py | 259 + .../modules/gcp_compute_global_forwarding_rule.py | 780 ++ .../gcp_compute_global_forwarding_rule_info.py | 343 + .../plugins/modules/gcp_compute_health_check.py | 1406 ++++ .../modules/gcp_compute_health_check_info.py | 581 ++ .../modules/gcp_compute_http_health_check.py | 458 ++ .../modules/gcp_compute_http_health_check_info.py | 258 + .../modules/gcp_compute_https_health_check.py | 455 ++ .../modules/gcp_compute_https_health_check_info.py | 258 + .../cloud/plugins/modules/gcp_compute_image.py | 872 +++ .../plugins/modules/gcp_compute_image_info.py | 405 + .../cloud/plugins/modules/gcp_compute_instance.py | 1900 +++++ .../plugins/modules/gcp_compute_instance_group.py | 570 ++ .../modules/gcp_compute_instance_group_info.py | 267 + .../modules/gcp_compute_instance_group_manager.py | 630 ++ .../gcp_compute_instance_group_manager_info.py | 342 + .../plugins/modules/gcp_compute_instance_info.py | 685 ++ .../modules/gcp_compute_instance_template.py | 1677 +++++ .../modules/gcp_compute_instance_template_info.py | 614 ++ .../modules/gcp_compute_interconnect_attachment.py | 675 ++ .../gcp_compute_interconnect_attachment_info.py | 374 + .../cloud/plugins/modules/gcp_compute_network.py | 461 ++ .../modules/gcp_compute_network_endpoint_group.py | 454 ++ .../gcp_compute_network_endpoint_group_info.py | 246 + .../plugins/modules/gcp_compute_network_info.py | 256 + .../plugins/modules/gcp_compute_node_group.py | 567 ++ .../plugins/modules/gcp_compute_node_group_info.py | 273 + .../plugins/modules/gcp_compute_node_template.py | 519 ++ .../modules/gcp_compute_node_template_info.py | 274 + .../modules/gcp_compute_region_autoscaler.py | 903 +++ .../modules/gcp_compute_region_autoscaler_info.py | 374 + .../modules/gcp_compute_region_backend_service.py | 2144 ++++++ .../gcp_compute_region_backend_service_info.py | 841 +++ .../plugins/modules/gcp_compute_region_disk.py | 681 ++ .../modules/gcp_compute_region_disk_info.py | 347 + .../modules/gcp_compute_region_health_check.py | 1424 ++++ .../gcp_compute_region_health_check_info.py | 592 ++ .../gcp_compute_region_instance_group_manager.py | 679 ++ ...p_compute_region_instance_group_manager_info.py | 355 + .../gcp_compute_region_target_http_proxy.py | 436 ++ .../gcp_compute_region_target_http_proxy_info.py | 230 + .../gcp_compute_region_target_https_proxy.py | 504 ++ .../gcp_compute_region_target_https_proxy_info.py | 237 + .../plugins/modules/gcp_compute_region_url_map.py | 4984 +++++++++++++ .../modules/gcp_compute_region_url_map_info.py | 1658 +++++ .../plugins/modules/gcp_compute_reservation.py | 685 ++ .../modules/gcp_compute_reservation_info.py | 312 + .../plugins/modules/gcp_compute_resource_policy.py | 1023 +++ .../modules/gcp_compute_resource_policy_info.py | 400 + .../cloud/plugins/modules/gcp_compute_route.py | 537 ++ .../plugins/modules/gcp_compute_route_info.py | 271 + .../cloud/plugins/modules/gcp_compute_router.py | 559 ++ .../plugins/modules/gcp_compute_router_info.py | 276 + .../cloud/plugins/modules/gcp_compute_snapshot.py | 642 ++ .../plugins/modules/gcp_compute_snapshot_info.py | 314 + .../plugins/modules/gcp_compute_ssl_certificate.py | 396 + .../modules/gcp_compute_ssl_certificate_info.py | 225 + .../plugins/modules/gcp_compute_ssl_policy.py | 462 ++ .../plugins/modules/gcp_compute_ssl_policy_info.py | 261 + .../plugins/modules/gcp_compute_subnetwork.py | 586 ++ .../plugins/modules/gcp_compute_subnetwork_info.py | 280 + .../modules/gcp_compute_target_http_proxy.py | 435 ++ .../modules/gcp_compute_target_http_proxy_info.py | 225 + .../modules/gcp_compute_target_https_proxy.py | 559 ++ .../modules/gcp_compute_target_https_proxy_info.py | 247 + .../plugins/modules/gcp_compute_target_instance.py | 419 ++ .../modules/gcp_compute_target_instance_info.py | 232 + .../plugins/modules/gcp_compute_target_pool.py | 522 ++ .../modules/gcp_compute_target_pool_info.py | 278 + .../modules/gcp_compute_target_ssl_proxy.py | 531 ++ .../modules/gcp_compute_target_ssl_proxy_info.py | 237 + .../modules/gcp_compute_target_tcp_proxy.py | 453 ++ .../modules/gcp_compute_target_tcp_proxy_info.py | 229 + .../modules/gcp_compute_target_vpn_gateway.py | 402 + .../modules/gcp_compute_target_vpn_gateway_info.py | 240 + .../cloud/plugins/modules/gcp_compute_url_map.py | 7871 ++++++++++++++++++++ .../plugins/modules/gcp_compute_url_map_info.py | 2541 +++++++ .../plugins/modules/gcp_compute_vpn_tunnel.py | 612 ++ .../plugins/modules/gcp_compute_vpn_tunnel_info.py | 301 + .../cloud/plugins/modules/gcp_container_cluster.py | 2435 ++++++ .../plugins/modules/gcp_container_cluster_info.py | 895 +++ .../plugins/modules/gcp_container_node_pool.py | 1218 +++ .../modules/gcp_container_node_pool_info.py | 487 ++ .../cloud/plugins/modules/gcp_dns_managed_zone.py | 854 +++ .../plugins/modules/gcp_dns_managed_zone_info.py | 344 + .../plugins/modules/gcp_dns_resource_record_set.py | 498 ++ .../modules/gcp_dns_resource_record_set_info.py | 197 + .../plugins/modules/gcp_filestore_instance.py | 576 ++ .../plugins/modules/gcp_filestore_instance_info.py | 249 + .../google/cloud/plugins/modules/gcp_iam_role.py | 388 + .../cloud/plugins/modules/gcp_iam_role_info.py | 192 + .../plugins/modules/gcp_iam_service_account.py | 303 + .../modules/gcp_iam_service_account_info.py | 191 + .../plugins/modules/gcp_iam_service_account_key.py | 321 + .../cloud/plugins/modules/gcp_kms_crypto_key.py | 455 ++ .../plugins/modules/gcp_kms_crypto_key_info.py | 234 + .../cloud/plugins/modules/gcp_kms_key_ring.py | 295 + .../cloud/plugins/modules/gcp_kms_key_ring_info.py | 187 + .../cloud/plugins/modules/gcp_logging_metric.py | 823 ++ .../plugins/modules/gcp_logging_metric_info.py | 338 + .../cloud/plugins/modules/gcp_mlengine_model.py | 438 ++ .../plugins/modules/gcp_mlengine_model_info.py | 205 + .../cloud/plugins/modules/gcp_mlengine_version.py | 634 ++ .../plugins/modules/gcp_mlengine_version_info.py | 297 + .../plugins/modules/gcp_pubsub_subscription.py | 875 +++ .../modules/gcp_pubsub_subscription_info.py | 369 + .../cloud/plugins/modules/gcp_pubsub_topic.py | 443 ++ .../cloud/plugins/modules/gcp_pubsub_topic_info.py | 216 + .../cloud/plugins/modules/gcp_redis_instance.py | 675 ++ .../plugins/modules/gcp_redis_instance_info.py | 327 + .../plugins/modules/gcp_resourcemanager_project.py | 421 ++ .../modules/gcp_resourcemanager_project_info.py | 229 + .../plugins/modules/gcp_runtimeconfig_config.py | 273 + .../modules/gcp_runtimeconfig_config_info.py | 171 + .../plugins/modules/gcp_runtimeconfig_variable.py | 321 + .../modules/gcp_runtimeconfig_variable_info.py | 187 + .../plugins/modules/gcp_serviceusage_service.py | 447 ++ .../modules/gcp_serviceusage_service_info.py | 214 + .../plugins/modules/gcp_sourcerepo_repository.py | 292 + .../modules/gcp_sourcerepo_repository_info.py | 178 + .../cloud/plugins/modules/gcp_spanner_database.py | 446 ++ .../plugins/modules/gcp_spanner_database_info.py | 204 + .../cloud/plugins/modules/gcp_spanner_instance.py | 421 ++ .../plugins/modules/gcp_spanner_instance_info.py | 201 + .../cloud/plugins/modules/gcp_sql_database.py | 359 + .../cloud/plugins/modules/gcp_sql_database_info.py | 194 + .../cloud/plugins/modules/gcp_sql_instance.py | 1388 ++++ .../cloud/plugins/modules/gcp_sql_instance_info.py | 555 ++ .../cloud/plugins/modules/gcp_sql_ssl_cert.py | 392 + .../google/cloud/plugins/modules/gcp_sql_user.py | 392 + .../cloud/plugins/modules/gcp_sql_user_info.py | 195 + .../cloud/plugins/modules/gcp_storage_bucket.py | 1428 ++++ .../modules/gcp_storage_bucket_access_control.py | 395 + .../modules/gcp_storage_default_object_acl.py | 413 + .../cloud/plugins/modules/gcp_storage_object.py | 305 + .../google/cloud/plugins/modules/gcp_tpu_node.py | 554 ++ .../cloud/plugins/modules/gcp_tpu_node_info.py | 263 + 170 files changed, 105758 insertions(+) create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_address.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_address_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_disk.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_disk_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_image.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_image_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_network.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_network_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_route.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_route_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_router.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_router_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_container_cluster.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_container_cluster_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_iam_role.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_iam_role_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_key.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_logging_metric.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_logging_metric_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_redis_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_spanner_database.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_spanner_database_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_database.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_database_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_instance.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_instance_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_ssl_cert.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_user.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_sql_user_info.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket_access_control.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_storage_default_object_acl.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_storage_object.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_tpu_node.py create mode 100644 ansible_collections/google/cloud/plugins/modules/gcp_tpu_node_info.py (limited to 'ansible_collections/google/cloud/plugins/modules') diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule.py b/ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule.py new file mode 100644 index 000000000..508189fe0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_appengine_firewall_rule +description: +- A single firewall rule that is evaluated against incoming traffic and provides an + action to take on matched requests. +short_description: Creates a GCP FirewallRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional string description of this rule. + required: false + type: str + source_range: + description: + - IP address or range, defined using CIDR notation, of requests that this rule + applies to. + required: true + type: str + action: + description: + - The action to take if this rule matches. + - 'Some valid choices include: "UNSPECIFIED_ACTION", "ALLOW", "DENY"' + required: true + type: str + priority: + description: + - A positive integer that defines the order of rule evaluation. + - Rules with the lowest priority are evaluated first. + - A default rule at priority Int32.MaxValue matches all IPv4 and IPv6 traffic + when no previous rule matches. Only the action of this rule can be modified + by the user. + required: false + type: int + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.firewall.ingressRules)' +- 'Official Documentation: U(https://cloud.google.com/appengine/docs/standard/python/creating-firewalls#creating_firewall_rules)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a firewall rule + google.cloud.gcp_appengine_firewall_rule: + priority: 1000 + source_range: 10.0.0.0 + action: ALLOW + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +description: + description: + - An optional string description of this rule. + returned: success + type: str +sourceRange: + description: + - IP address or range, defined using CIDR notation, of requests that this rule applies + to. + returned: success + type: str +action: + description: + - The action to take if this rule matches. + returned: success + type: str +priority: + description: + - A positive integer that defines the order of rule evaluation. + - Rules with the lowest priority are evaluated first. + - A default rule at priority Int32.MaxValue matches all IPv4 and IPv6 traffic when + no previous rule matches. Only the action of this rule can be modified by the + user. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + source_range=dict(required=True, type='str'), + action=dict(required=True, type='str'), + priority=dict(type='int'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'appengine') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'appengine') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + del request['name'] + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('description') != response.get('description'): + update_mask.append('description') + if request.get('sourceRange') != response.get('sourceRange'): + update_mask.append('sourceRange') + if request.get('action') != response.get('action'): + update_mask.append('action') + if request.get('priority') != response.get('priority'): + update_mask.append('priority') + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'appengine') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = {u'description': module.params.get('description'), u'sourceRange': module.params.get('source_range'), u'action': module.params.get('action')} + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'appengine') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://appengine.googleapis.com/v1/apps/{project}/firewall/ingressRules/{priority}".format(**module.params) + + +def collection(module): + return "https://appengine.googleapis.com/v1/apps/{project}/firewall/ingressRules".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'description': response.get(u'description'), u'sourceRange': response.get(u'sourceRange'), u'action': response.get(u'action')} + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule_info.py new file mode 100644 index 000000000..639e16f85 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_appengine_firewall_rule_info.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_appengine_firewall_rule_info +description: +- Gather info for GCP FirewallRule +short_description: Gather info for GCP FirewallRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a firewall rule + gcp_appengine_firewall_rule_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - An optional string description of this rule. + returned: success + type: str + sourceRange: + description: + - IP address or range, defined using CIDR notation, of requests that this rule + applies to. + returned: success + type: str + action: + description: + - The action to take if this rule matches. + returned: success + type: str + priority: + description: + - A positive integer that defines the order of rule evaluation. + - Rules with the lowest priority are evaluated first. + - A default rule at priority Int32.MaxValue matches all IPv4 and IPv6 traffic + when no previous rule matches. Only the action of this rule can be modified + by the user. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://appengine.googleapis.com/v1/apps/{project}/firewall/ingressRules".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'appengine') + return auth.list(link, return_if_object, array_name='ingressRules') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset.py b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset.py new file mode 100644 index 000000000..85ad27681 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset.py @@ -0,0 +1,757 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigquery_dataset +description: +- Datasets allow you to organize and control access to your tables. +short_description: Creates a GCP Dataset +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Dataset name. + required: false + type: str + access: + description: + - An array of objects that define dataset access for one or more entities. + elements: dict + required: false + type: list + suboptions: + domain: + description: + - A domain to grant access to. Any users signed in with the domain specified + will be granted the specified access . + required: false + type: str + group_by_email: + description: + - An email address of a Google Group to grant access to. + required: false + type: str + role: + description: + - Describes the rights granted to the user specified by the other member of + the access object. Basic, predefined, and custom roles are supported. Predefined + roles that have equivalent basic roles are swapped by the API to their basic + counterparts. See [official docs](U(https://cloud.google.com/bigquery/docs/access-control)). + required: false + type: str + special_group: + description: + - 'A special group to grant access to. Possible values include: * `projectOwners`: + Owners of the enclosing project.' + - "* `projectReaders`: Readers of the enclosing project." + - "* `projectWriters`: Writers of the enclosing project." + - "* `allAuthenticatedUsers`: All authenticated BigQuery users." + required: false + type: str + user_by_email: + description: + - 'An email address of a user to grant access to. For example: fred@example.com + .' + required: false + type: str + view: + description: + - A view from a different dataset to grant access to. Queries executed against + that view will have read access to tables in this dataset. The role field + is not required when this field is set. If that view is updated by any user, + access to the view needs to be granted again via an update operation. + required: false + type: dict + suboptions: + dataset_id: + description: + - The ID of the dataset containing this table. + required: true + type: str + project_id: + description: + - The ID of the project containing this table. + required: true + type: str + table_id: + description: + - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers + (0-9), or underscores. The maximum length is 1,024 characters. + required: true + type: str + dataset_reference: + description: + - A reference that identifies the dataset. + required: true + type: dict + suboptions: + dataset_id: + description: + - A unique ID for this dataset, without the project name. The ID must contain + only letters (a-z, A-Z), numbers (0-9), or underscores. The maximum length + is 1,024 characters. + required: true + type: str + project_id: + description: + - The ID of the project containing this dataset. + required: false + type: str + default_table_expiration_ms: + description: + - The default lifetime of all tables in the dataset, in milliseconds. + - The minimum value is 3600000 milliseconds (one hour). + - Once this property is set, all newly-created tables in the dataset will have + an `expirationTime` property set to the creation time plus the value in this + property, and changing the value will only affect new tables, not existing ones. + When the `expirationTime` for a given table is reached, that table will be deleted + automatically. + - If a table's `expirationTime` is modified or removed before the table expires, + or if you provide an explicit `expirationTime` when creating a table, that value + takes precedence over the default expiration time indicated by this property. + required: false + type: int + default_partition_expiration_ms: + description: + - The default partition expiration for all partitioned tables in the dataset, + in milliseconds. + - Once this property is set, all newly-created partitioned tables in the dataset + will have an `expirationMs` property in the `timePartitioning` settings set + to this value, and changing the value will only affect new tables, not existing + ones. The storage in a partition will have an expiration time of its partition + time plus this value. + - 'Setting this property overrides the use of `defaultTableExpirationMs` for partitioned + tables: only one of `defaultTableExpirationMs` and `defaultPartitionExpirationMs` + will be used for any new partitioned table. If you provide an explicit `timePartitioning.expirationMs` + when creating or updating a partitioned table, that value takes precedence over + the default partition expiration time indicated by this property.' + required: false + type: int + description: + description: + - A user-friendly description of the dataset. + required: false + type: str + friendly_name: + description: + - A descriptive name for the dataset. + required: false + type: str + labels: + description: + - The labels associated with this dataset. You can use these to organize and group + your datasets . + required: false + type: dict + location: + description: + - The geographic location where the dataset should reside. + - See [official docs](U(https://cloud.google.com/bigquery/docs/dataset-locations)). + - There are two types of locations, regional or multi-regional. A regional location + is a specific geographic place, such as Tokyo, and a multi-regional location + is a large geographic area, such as the United States, that contains at least + two geographic places. + - The default value is multi-regional location `US`. + - Changing this forces a new resource to be created. + required: false + default: US + type: str + default_encryption_configuration: + description: + - The default encryption key for all tables in the dataset. Once this property + is set, all newly-created partitioned tables in the dataset will have encryption + key set to this value, unless table creation request (or query) overrides the + key. + required: false + type: dict + suboptions: + kms_key_name: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets)' +- 'Datasets Intro: U(https://cloud.google.com/bigquery/docs/datasets-intro)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a dataset + google.cloud.gcp_bigquery_dataset: + name: my_example_dataset + dataset_reference: + dataset_id: my_example_dataset + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - Dataset name. + returned: success + type: str +access: + description: + - An array of objects that define dataset access for one or more entities. + returned: success + type: complex + contains: + domain: + description: + - A domain to grant access to. Any users signed in with the domain specified + will be granted the specified access . + returned: success + type: str + groupByEmail: + description: + - An email address of a Google Group to grant access to. + returned: success + type: str + role: + description: + - Describes the rights granted to the user specified by the other member of + the access object. Basic, predefined, and custom roles are supported. Predefined + roles that have equivalent basic roles are swapped by the API to their basic + counterparts. See [official docs](U(https://cloud.google.com/bigquery/docs/access-control)). + returned: success + type: str + specialGroup: + description: + - 'A special group to grant access to. Possible values include: * `projectOwners`: + Owners of the enclosing project.' + - "* `projectReaders`: Readers of the enclosing project." + - "* `projectWriters`: Writers of the enclosing project." + - "* `allAuthenticatedUsers`: All authenticated BigQuery users." + returned: success + type: str + userByEmail: + description: + - 'An email address of a user to grant access to. For example: fred@example.com + .' + returned: success + type: str + view: + description: + - A view from a different dataset to grant access to. Queries executed against + that view will have read access to tables in this dataset. The role field + is not required when this field is set. If that view is updated by any user, + access to the view needs to be granted again via an update operation. + returned: success + type: complex + contains: + datasetId: + description: + - The ID of the dataset containing this table. + returned: success + type: str + projectId: + description: + - The ID of the project containing this table. + returned: success + type: str + tableId: + description: + - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers + (0-9), or underscores. The maximum length is 1,024 characters. + returned: success + type: str +creationTime: + description: + - The time when this dataset was created, in milliseconds since the epoch. + returned: success + type: int +datasetReference: + description: + - A reference that identifies the dataset. + returned: success + type: complex + contains: + datasetId: + description: + - A unique ID for this dataset, without the project name. The ID must contain + only letters (a-z, A-Z), numbers (0-9), or underscores. The maximum length + is 1,024 characters. + returned: success + type: str + projectId: + description: + - The ID of the project containing this dataset. + returned: success + type: str +defaultTableExpirationMs: + description: + - The default lifetime of all tables in the dataset, in milliseconds. + - The minimum value is 3600000 milliseconds (one hour). + - Once this property is set, all newly-created tables in the dataset will have an + `expirationTime` property set to the creation time plus the value in this property, + and changing the value will only affect new tables, not existing ones. When the + `expirationTime` for a given table is reached, that table will be deleted automatically. + - If a table's `expirationTime` is modified or removed before the table expires, + or if you provide an explicit `expirationTime` when creating a table, that value + takes precedence over the default expiration time indicated by this property. + returned: success + type: int +defaultPartitionExpirationMs: + description: + - The default partition expiration for all partitioned tables in the dataset, in + milliseconds. + - Once this property is set, all newly-created partitioned tables in the dataset + will have an `expirationMs` property in the `timePartitioning` settings set to + this value, and changing the value will only affect new tables, not existing ones. + The storage in a partition will have an expiration time of its partition time + plus this value. + - 'Setting this property overrides the use of `defaultTableExpirationMs` for partitioned + tables: only one of `defaultTableExpirationMs` and `defaultPartitionExpirationMs` + will be used for any new partitioned table. If you provide an explicit `timePartitioning.expirationMs` + when creating or updating a partitioned table, that value takes precedence over + the default partition expiration time indicated by this property.' + returned: success + type: int +description: + description: + - A user-friendly description of the dataset. + returned: success + type: str +etag: + description: + - A hash of the resource. + returned: success + type: str +friendlyName: + description: + - A descriptive name for the dataset. + returned: success + type: str +id: + description: + - The fully-qualified unique name of the dataset in the format projectId:datasetId. + The dataset name without the project name is given in the datasetId field . + returned: success + type: str +labels: + description: + - The labels associated with this dataset. You can use these to organize and group + your datasets . + returned: success + type: dict +lastModifiedTime: + description: + - The date when this dataset or any of its tables was last modified, in milliseconds + since the epoch. + returned: success + type: int +location: + description: + - The geographic location where the dataset should reside. + - See [official docs](U(https://cloud.google.com/bigquery/docs/dataset-locations)). + - There are two types of locations, regional or multi-regional. A regional location + is a specific geographic place, such as Tokyo, and a multi-regional location is + a large geographic area, such as the United States, that contains at least two + geographic places. + - The default value is multi-regional location `US`. + - Changing this forces a new resource to be created. + returned: success + type: str +defaultEncryptionConfiguration: + description: + - The default encryption key for all tables in the dataset. Once this property is + set, all newly-created partitioned tables in the dataset will have encryption + key set to this value, unless table creation request (or query) overrides the + key. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(type='str'), + access=dict( + type='list', + elements='dict', + options=dict( + domain=dict(type='str'), + group_by_email=dict(type='str'), + role=dict(type='str'), + special_group=dict(type='str'), + user_by_email=dict(type='str'), + view=dict( + type='dict', + options=dict( + dataset_id=dict(required=True, type='str'), project_id=dict(required=True, type='str'), table_id=dict(required=True, type='str') + ), + ), + ), + ), + dataset_reference=dict(required=True, type='dict', options=dict(dataset_id=dict(required=True, type='str'), project_id=dict(type='str'))), + default_table_expiration_ms=dict(type='int'), + default_partition_expiration_ms=dict(type='int'), + description=dict(type='str'), + friendly_name=dict(type='str'), + labels=dict(type='dict'), + location=dict(default='US', type='str'), + default_encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(required=True, type='str'))), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] + + state = module.params['state'] + kind = 'bigquery#dataset' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.post(link, resource_to_request(module)), kind) + + +def update(module, link, kind): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.put(link, resource_to_request(module)), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.delete(link), kind) + + +def resource_to_request(module): + request = { + u'kind': 'bigquery#dataset', + u'name': module.params.get('name'), + u'access': DatasetAccessArray(module.params.get('access', []), module).to_request(), + u'datasetReference': DatasetDatasetreference(module.params.get('dataset_reference', {}), module).to_request(), + u'defaultTableExpirationMs': module.params.get('default_table_expiration_ms'), + u'defaultPartitionExpirationMs': module.params.get('default_partition_expiration_ms'), + u'description': module.params.get('description'), + u'friendlyName': module.params.get('friendly_name'), + u'labels': module.params.get('labels'), + u'location': module.params.get('location'), + u'defaultEncryptionConfiguration': DatasetDefaultencryptionconfiguration( + module.params.get('default_encryption_configuration', {}), module + ).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{name}".format(**module.params) + + +def collection(module): + return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'access': DatasetAccessArray(response.get(u'access', []), module).from_response(), + u'creationTime': response.get(u'creationTime'), + u'datasetReference': DatasetDatasetreference(module.params.get('dataset_reference', {}), module).to_request(), + u'defaultTableExpirationMs': response.get(u'defaultTableExpirationMs'), + u'defaultPartitionExpirationMs': response.get(u'defaultPartitionExpirationMs'), + u'description': response.get(u'description'), + u'etag': response.get(u'etag'), + u'friendlyName': response.get(u'friendlyName'), + u'id': response.get(u'id'), + u'labels': response.get(u'labels'), + u'lastModifiedTime': response.get(u'lastModifiedTime'), + u'location': response.get(u'location'), + u'defaultEncryptionConfiguration': DatasetDefaultencryptionconfiguration(response.get(u'defaultEncryptionConfiguration', {}), module).from_response(), + } + + +class DatasetAccessArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'domain': item.get('domain'), + u'groupByEmail': item.get('group_by_email'), + u'role': item.get('role'), + u'specialGroup': item.get('special_group'), + u'userByEmail': item.get('user_by_email'), + u'view': DatasetView(item.get('view', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'domain': item.get(u'domain'), + u'groupByEmail': item.get(u'groupByEmail'), + u'role': item.get(u'role'), + u'specialGroup': item.get(u'specialGroup'), + u'userByEmail': item.get(u'userByEmail'), + u'view': DatasetView(item.get(u'view', {}), self.module).from_response(), + } + ) + + +class DatasetView(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id'), u'tableId': self.request.get('table_id')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'datasetId': self.request.get(u'datasetId'), u'projectId': self.request.get(u'projectId'), u'tableId': self.request.get(u'tableId')} + ) + + +class DatasetDatasetreference(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id')}) + + def from_response(self): + return remove_nones_from_dict({u'datasetId': self.module.params.get('dataset_id'), u'projectId': self.module.params.get('project_id')}) + + +class DatasetDefaultencryptionconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')}) + + def from_response(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset_info.py new file mode 100644 index 000000000..cc4852147 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_dataset_info.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigquery_dataset_info +description: +- Gather info for GCP Dataset +short_description: Gather info for GCP Dataset +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a dataset + gcp_bigquery_dataset_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Dataset name. + returned: success + type: str + access: + description: + - An array of objects that define dataset access for one or more entities. + returned: success + type: complex + contains: + domain: + description: + - A domain to grant access to. Any users signed in with the domain specified + will be granted the specified access . + returned: success + type: str + groupByEmail: + description: + - An email address of a Google Group to grant access to. + returned: success + type: str + role: + description: + - Describes the rights granted to the user specified by the other member + of the access object. Basic, predefined, and custom roles are supported. + Predefined roles that have equivalent basic roles are swapped by the API + to their basic counterparts. See [official docs](U(https://cloud.google.com/bigquery/docs/access-control)). + returned: success + type: str + specialGroup: + description: + - 'A special group to grant access to. Possible values include: * `projectOwners`: + Owners of the enclosing project.' + - "* `projectReaders`: Readers of the enclosing project." + - "* `projectWriters`: Writers of the enclosing project." + - "* `allAuthenticatedUsers`: All authenticated BigQuery users." + returned: success + type: str + userByEmail: + description: + - 'An email address of a user to grant access to. For example: fred@example.com + .' + returned: success + type: str + view: + description: + - A view from a different dataset to grant access to. Queries executed against + that view will have read access to tables in this dataset. The role field + is not required when this field is set. If that view is updated by any + user, access to the view needs to be granted again via an update operation. + returned: success + type: complex + contains: + datasetId: + description: + - The ID of the dataset containing this table. + returned: success + type: str + projectId: + description: + - The ID of the project containing this table. + returned: success + type: str + tableId: + description: + - The ID of the table. The ID must contain only letters (a-z, A-Z), + numbers (0-9), or underscores. The maximum length is 1,024 characters. + returned: success + type: str + creationTime: + description: + - The time when this dataset was created, in milliseconds since the epoch. + returned: success + type: int + datasetReference: + description: + - A reference that identifies the dataset. + returned: success + type: complex + contains: + datasetId: + description: + - A unique ID for this dataset, without the project name. The ID must contain + only letters (a-z, A-Z), numbers (0-9), or underscores. The maximum length + is 1,024 characters. + returned: success + type: str + projectId: + description: + - The ID of the project containing this dataset. + returned: success + type: str + defaultTableExpirationMs: + description: + - The default lifetime of all tables in the dataset, in milliseconds. + - The minimum value is 3600000 milliseconds (one hour). + - Once this property is set, all newly-created tables in the dataset will have + an `expirationTime` property set to the creation time plus the value in this + property, and changing the value will only affect new tables, not existing + ones. When the `expirationTime` for a given table is reached, that table will + be deleted automatically. + - If a table's `expirationTime` is modified or removed before the table expires, + or if you provide an explicit `expirationTime` when creating a table, that + value takes precedence over the default expiration time indicated by this + property. + returned: success + type: int + defaultPartitionExpirationMs: + description: + - The default partition expiration for all partitioned tables in the dataset, + in milliseconds. + - Once this property is set, all newly-created partitioned tables in the dataset + will have an `expirationMs` property in the `timePartitioning` settings set + to this value, and changing the value will only affect new tables, not existing + ones. The storage in a partition will have an expiration time of its partition + time plus this value. + - 'Setting this property overrides the use of `defaultTableExpirationMs` for + partitioned tables: only one of `defaultTableExpirationMs` and `defaultPartitionExpirationMs` + will be used for any new partitioned table. If you provide an explicit `timePartitioning.expirationMs` + when creating or updating a partitioned table, that value takes precedence + over the default partition expiration time indicated by this property.' + returned: success + type: int + description: + description: + - A user-friendly description of the dataset. + returned: success + type: str + etag: + description: + - A hash of the resource. + returned: success + type: str + friendlyName: + description: + - A descriptive name for the dataset. + returned: success + type: str + id: + description: + - The fully-qualified unique name of the dataset in the format projectId:datasetId. + The dataset name without the project name is given in the datasetId field + . + returned: success + type: str + labels: + description: + - The labels associated with this dataset. You can use these to organize and + group your datasets . + returned: success + type: dict + lastModifiedTime: + description: + - The date when this dataset or any of its tables was last modified, in milliseconds + since the epoch. + returned: success + type: int + location: + description: + - The geographic location where the dataset should reside. + - See [official docs](U(https://cloud.google.com/bigquery/docs/dataset-locations)). + - There are two types of locations, regional or multi-regional. A regional location + is a specific geographic place, such as Tokyo, and a multi-regional location + is a large geographic area, such as the United States, that contains at least + two geographic places. + - The default value is multi-regional location `US`. + - Changing this forces a new resource to be created. + returned: success + type: str + defaultEncryptionConfiguration: + description: + - The default encryption key for all tables in the dataset. Once this property + is set, all newly-created partitioned tables in the dataset will have encryption + key set to this value, unless table creation request (or query) overrides + the key. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'bigquery') + return auth.list(link, return_if_object, array_name='datasets') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py new file mode 100644 index 000000000..d02d220cb --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py @@ -0,0 +1,1713 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigquery_table +description: +- A Table that belongs to a Dataset . +short_description: Creates a GCP Table +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + table_reference: + description: + - Reference describing the ID of this table. + required: false + type: dict + suboptions: + dataset_id: + description: + - The ID of the dataset containing this table. + required: false + type: str + project_id: + description: + - The ID of the project containing this table. + required: false + type: str + table_id: + description: + - The ID of the the table. + required: false + type: str + clustering: + description: + - One or more fields on which data should be clustered. Only top-level, non-repeated, + simple-type fields are supported. When you cluster a table using multiple columns, + the order of columns you specify is important. The order of the specified columns + determines the sort order of the data. + elements: str + required: false + type: list + description: + description: + - A user-friendly description of the dataset. + required: false + type: str + friendly_name: + description: + - A descriptive name for this table. + required: false + type: str + labels: + description: + - The labels associated with this dataset. You can use these to organize and group + your datasets . + required: false + type: dict + name: + description: + - Name of the table. + required: false + type: str + num_rows: + description: + - The number of rows of data in this table, excluding any data in the streaming + buffer. + required: false + type: int + view: + description: + - The view definition. + required: false + type: dict + suboptions: + use_legacy_sql: + description: + - Specifies whether to use BigQuery's legacy SQL for this view . + required: false + type: bool + user_defined_function_resources: + description: + - Describes user-defined function resources used in the query. + elements: dict + required: false + type: list + suboptions: + inline_code: + description: + - An inline resource that contains code for a user-defined function (UDF). + Providing a inline code resource is equivalent to providing a URI for + a file containing the same code. + required: false + type: str + resource_uri: + description: + - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). + required: false + type: str + time_partitioning: + description: + - If specified, configures time-based partitioning for this table. + required: false + type: dict + suboptions: + expiration_ms: + description: + - Number of milliseconds for which to keep the storage for a partition. + required: false + type: int + field: + description: + - If not set, the table is partitioned by pseudo column, referenced via either + '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If + field is specified, the table is instead partitioned by this field. The + field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE + or REQUIRED. + required: false + type: str + type: + description: + - The only type supported is DAY, which will generate one partition per day. + - 'Some valid choices include: "DAY"' + required: false + type: str + schema: + description: + - Describes the schema of this table. + required: false + type: dict + suboptions: + fields: + description: + - Describes the fields in a table. + elements: dict + required: false + type: list + suboptions: + description: + description: + - The field description. The maximum length is 1,024 characters. + required: false + type: str + fields: + description: + - Describes the nested schema fields if the type property is set to RECORD. + elements: str + required: false + type: list + mode: + description: + - The field mode. + - 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"' + required: false + type: str + name: + description: + - The field name. + required: false + type: str + type: + description: + - The field data type. + - 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT", + "TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"' + required: false + type: str + encryption_configuration: + description: + - Custom encryption configuration. + required: false + type: dict + suboptions: + kms_key_name: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + required: false + type: str + expiration_time: + description: + - The time when this table expires, in milliseconds since the epoch. If not present, + the table will persist indefinitely. + required: false + type: int + external_data_configuration: + description: + - Describes the data format, location, and other properties of a table stored + outside of BigQuery. By defining these properties, the data source can then + be queried as if it were a standard BigQuery table. + required: false + type: dict + suboptions: + autodetect: + description: + - Try to detect schema and format options automatically. Any option specified + explicitly will be honored. + required: false + type: bool + compression: + description: + - The compression type of the data source. + - 'Some valid choices include: "GZIP", "NONE"' + required: false + type: str + ignore_unknown_values: + description: + - Indicates if BigQuery should allow extra values that are not represented + in the table schema . + required: false + type: bool + max_bad_records: + description: + - The maximum number of bad records that BigQuery can ignore when reading + data . + required: false + type: int + source_format: + description: + - The data format. + - 'Some valid choices include: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", + "AVRO", "DATASTORE_BACKUP", "BIGTABLE", "ORC"' + required: false + type: str + source_uris: + description: + - The fully-qualified URIs that point to your data in Google Cloud. + - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard + character and it must come after the ''bucket'' name. Size limits related + to load jobs apply to external data sources. For Google Cloud Bigtable URIs: + Exactly one URI can be specified and it has be a fully specified and valid + HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore + backups, exactly one URI can be specified. Also, the ''*'' wildcard character + is not allowed.' + elements: str + required: false + type: list + schema: + description: + - The schema for the data. Schema is required for CSV and JSON formats. + required: false + type: dict + suboptions: + fields: + description: + - Describes the fields in a table. + elements: dict + required: false + type: list + suboptions: + description: + description: + - The field description. + required: false + type: str + fields: + description: + - Describes the nested schema fields if the type property is set to + RECORD . + elements: str + required: false + type: list + mode: + description: + - Field mode. + - 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"' + required: false + type: str + name: + description: + - Field name. + required: false + type: str + type: + description: + - Field data type. + - 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT", + "TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"' + required: false + type: str + google_sheets_options: + description: + - Additional options if sourceFormat is set to GOOGLE_SHEETS. + required: false + type: dict + suboptions: + skip_leading_rows: + description: + - The number of rows at the top of a Google Sheet that BigQuery will skip + when reading the data. + required: false + type: int + csv_options: + description: + - Additional properties to set if sourceFormat is set to CSV. + required: false + type: dict + suboptions: + allow_jagged_rows: + description: + - Indicates if BigQuery should accept rows that are missing trailing optional + columns . + required: false + type: bool + allow_quoted_newlines: + description: + - Indicates if BigQuery should allow quoted data sections that contain + newline characters in a CSV file . + required: false + type: bool + encoding: + description: + - The character encoding of the data. + - 'Some valid choices include: "UTF-8", "ISO-8859-1"' + required: false + type: str + field_delimiter: + description: + - The separator for fields in a CSV file. + required: false + type: str + quote: + description: + - The value that is used to quote data sections in a CSV file. + required: false + type: str + skip_leading_rows: + description: + - The number of rows at the top of a CSV file that BigQuery will skip + when reading the data. + required: false + type: int + bigtable_options: + description: + - Additional options if sourceFormat is set to BIGTABLE. + required: false + type: dict + suboptions: + ignore_unspecified_column_families: + description: + - If field is true, then the column families that are not specified in + columnFamilies list are not exposed in the table schema . + required: false + type: bool + read_rowkey_as_string: + description: + - If field is true, then the rowkey column families will be read and converted + to string. + required: false + type: bool + column_families: + description: + - List of column families to expose in the table schema along with their + types. + elements: dict + required: false + type: list + suboptions: + columns: + description: + - Lists of columns that should be exposed as individual fields as + opposed to a list of (column name, value) pairs. + elements: dict + required: false + type: list + suboptions: + encoding: + description: + - The encoding of the values when the type is not STRING. + - 'Some valid choices include: "TEXT", "BINARY"' + required: false + type: str + field_name: + description: + - If the qualifier is not a valid BigQuery field identifier, a + valid identifier must be provided as the column field name and + is used as field name in queries. + required: false + type: str + only_read_latest: + description: + - If this is set, only the latest version of value in this column + are exposed . + required: false + type: bool + qualifier_string: + description: + - Qualifier of the column. + required: true + type: str + type: + description: + - The type to convert the value in cells of this column. + - 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT", + "BOOLEAN"' + required: false + type: str + encoding: + description: + - The encoding of the values when the type is not STRING. + - 'Some valid choices include: "TEXT", "BINARY"' + required: false + type: str + family_id: + description: + - Identifier of the column family. + required: false + type: str + only_read_latest: + description: + - If this is set only the latest version of value are exposed for + all columns in this column family . + required: false + type: bool + type: + description: + - The type to convert the value in cells of this column family. + - 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT", + "BOOLEAN"' + required: false + type: str + dataset: + description: + - Name of the dataset. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a dataset + google.cloud.gcp_bigquery_dataset: + name: example_dataset + dataset_reference: + dataset_id: example_dataset + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: dataset + +- name: create a table + google.cloud.gcp_bigquery_table: + name: example_table + dataset: example_dataset + table_reference: + dataset_id: example_dataset + project_id: test_project + table_id: example_table + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +tableReference: + description: + - Reference describing the ID of this table. + returned: success + type: complex + contains: + datasetId: + description: + - The ID of the dataset containing this table. + returned: success + type: str + projectId: + description: + - The ID of the project containing this table. + returned: success + type: str + tableId: + description: + - The ID of the the table. + returned: success + type: str +clustering: + description: + - One or more fields on which data should be clustered. Only top-level, non-repeated, + simple-type fields are supported. When you cluster a table using multiple columns, + the order of columns you specify is important. The order of the specified columns + determines the sort order of the data. + returned: success + type: list +creationTime: + description: + - The time when this dataset was created, in milliseconds since the epoch. + returned: success + type: int +description: + description: + - A user-friendly description of the dataset. + returned: success + type: str +friendlyName: + description: + - A descriptive name for this table. + returned: success + type: str +id: + description: + - An opaque ID uniquely identifying the table. + returned: success + type: str +labels: + description: + - The labels associated with this dataset. You can use these to organize and group + your datasets . + returned: success + type: dict +lastModifiedTime: + description: + - The time when this table was last modified, in milliseconds since the epoch. + returned: success + type: int +location: + description: + - The geographic location where the table resides. This value is inherited from + the dataset. + returned: success + type: str +name: + description: + - Name of the table. + returned: success + type: str +numBytes: + description: + - The size of this table in bytes, excluding any data in the streaming buffer. + returned: success + type: int +numLongTermBytes: + description: + - The number of bytes in the table that are considered "long-term storage". + returned: success + type: int +numRows: + description: + - The number of rows of data in this table, excluding any data in the streaming + buffer. + returned: success + type: int +requirePartitionFilter: + description: + - If set to true, queries over this table require a partition filter that can be + used for partition elimination to be specified. + returned: success + type: bool +type: + description: + - Describes the table type. + returned: success + type: str +view: + description: + - The view definition. + returned: success + type: complex + contains: + useLegacySql: + description: + - Specifies whether to use BigQuery's legacy SQL for this view . + returned: success + type: bool + userDefinedFunctionResources: + description: + - Describes user-defined function resources used in the query. + returned: success + type: complex + contains: + inlineCode: + description: + - An inline resource that contains code for a user-defined function (UDF). + Providing a inline code resource is equivalent to providing a URI for + a file containing the same code. + returned: success + type: str + resourceUri: + description: + - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). + returned: success + type: str +timePartitioning: + description: + - If specified, configures time-based partitioning for this table. + returned: success + type: complex + contains: + expirationMs: + description: + - Number of milliseconds for which to keep the storage for a partition. + returned: success + type: int + field: + description: + - If not set, the table is partitioned by pseudo column, referenced via either + '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field + is specified, the table is instead partitioned by this field. The field must + be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. + returned: success + type: str + type: + description: + - The only type supported is DAY, which will generate one partition per day. + returned: success + type: str +streamingBuffer: + description: + - Contains information regarding this table's streaming buffer, if one is present. + This field will be absent if the table is not being streamed to or if there is + no data in the streaming buffer. + returned: success + type: complex + contains: + estimatedBytes: + description: + - A lower-bound estimate of the number of bytes currently in the streaming buffer. + returned: success + type: int + estimatedRows: + description: + - A lower-bound estimate of the number of rows currently in the streaming buffer. + returned: success + type: int + oldestEntryTime: + description: + - Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds + since the epoch, if the streaming buffer is available. + returned: success + type: int +schema: + description: + - Describes the schema of this table. + returned: success + type: complex + contains: + fields: + description: + - Describes the fields in a table. + returned: success + type: complex + contains: + description: + description: + - The field description. The maximum length is 1,024 characters. + returned: success + type: str + fields: + description: + - Describes the nested schema fields if the type property is set to RECORD. + returned: success + type: list + mode: + description: + - The field mode. + returned: success + type: str + name: + description: + - The field name. + returned: success + type: str + type: + description: + - The field data type. + returned: success + type: str +encryptionConfiguration: + description: + - Custom encryption configuration. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + returned: success + type: str +expirationTime: + description: + - The time when this table expires, in milliseconds since the epoch. If not present, + the table will persist indefinitely. + returned: success + type: int +externalDataConfiguration: + description: + - Describes the data format, location, and other properties of a table stored outside + of BigQuery. By defining these properties, the data source can then be queried + as if it were a standard BigQuery table. + returned: success + type: complex + contains: + autodetect: + description: + - Try to detect schema and format options automatically. Any option specified + explicitly will be honored. + returned: success + type: bool + compression: + description: + - The compression type of the data source. + returned: success + type: str + ignoreUnknownValues: + description: + - Indicates if BigQuery should allow extra values that are not represented in + the table schema . + returned: success + type: bool + maxBadRecords: + description: + - The maximum number of bad records that BigQuery can ignore when reading data + . + returned: success + type: int + sourceFormat: + description: + - The data format. + returned: success + type: str + sourceUris: + description: + - The fully-qualified URIs that point to your data in Google Cloud. + - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character + and it must come after the ''bucket'' name. Size limits related to load jobs + apply to external data sources. For Google Cloud Bigtable URIs: Exactly one + URI can be specified and it has be a fully specified and valid HTTPS URL for + a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly + one URI can be specified. Also, the ''*'' wildcard character is not allowed.' + returned: success + type: list + schema: + description: + - The schema for the data. Schema is required for CSV and JSON formats. + returned: success + type: complex + contains: + fields: + description: + - Describes the fields in a table. + returned: success + type: complex + contains: + description: + description: + - The field description. + returned: success + type: str + fields: + description: + - Describes the nested schema fields if the type property is set to + RECORD . + returned: success + type: list + mode: + description: + - Field mode. + returned: success + type: str + name: + description: + - Field name. + returned: success + type: str + type: + description: + - Field data type. + returned: success + type: str + googleSheetsOptions: + description: + - Additional options if sourceFormat is set to GOOGLE_SHEETS. + returned: success + type: complex + contains: + skipLeadingRows: + description: + - The number of rows at the top of a Google Sheet that BigQuery will skip + when reading the data. + returned: success + type: int + csvOptions: + description: + - Additional properties to set if sourceFormat is set to CSV. + returned: success + type: complex + contains: + allowJaggedRows: + description: + - Indicates if BigQuery should accept rows that are missing trailing optional + columns . + returned: success + type: bool + allowQuotedNewlines: + description: + - Indicates if BigQuery should allow quoted data sections that contain newline + characters in a CSV file . + returned: success + type: bool + encoding: + description: + - The character encoding of the data. + returned: success + type: str + fieldDelimiter: + description: + - The separator for fields in a CSV file. + returned: success + type: str + quote: + description: + - The value that is used to quote data sections in a CSV file. + returned: success + type: str + skipLeadingRows: + description: + - The number of rows at the top of a CSV file that BigQuery will skip when + reading the data. + returned: success + type: int + bigtableOptions: + description: + - Additional options if sourceFormat is set to BIGTABLE. + returned: success + type: complex + contains: + ignoreUnspecifiedColumnFamilies: + description: + - If field is true, then the column families that are not specified in columnFamilies + list are not exposed in the table schema . + returned: success + type: bool + readRowkeyAsString: + description: + - If field is true, then the rowkey column families will be read and converted + to string. + returned: success + type: bool + columnFamilies: + description: + - List of column families to expose in the table schema along with their + types. + returned: success + type: complex + contains: + columns: + description: + - Lists of columns that should be exposed as individual fields as opposed + to a list of (column name, value) pairs. + returned: success + type: complex + contains: + encoding: + description: + - The encoding of the values when the type is not STRING. + returned: success + type: str + fieldName: + description: + - If the qualifier is not a valid BigQuery field identifier, a valid + identifier must be provided as the column field name and is used + as field name in queries. + returned: success + type: str + onlyReadLatest: + description: + - If this is set, only the latest version of value in this column + are exposed . + returned: success + type: bool + qualifierString: + description: + - Qualifier of the column. + returned: success + type: str + type: + description: + - The type to convert the value in cells of this column. + returned: success + type: str + encoding: + description: + - The encoding of the values when the type is not STRING. + returned: success + type: str + familyId: + description: + - Identifier of the column family. + returned: success + type: str + onlyReadLatest: + description: + - If this is set only the latest version of value are exposed for all + columns in this column family . + returned: success + type: bool + type: + description: + - The type to convert the value in cells of this column family. + returned: success + type: str +dataset: + description: + - Name of the dataset. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + table_reference=dict(type='dict', options=dict(dataset_id=dict(type='str'), project_id=dict(type='str'), table_id=dict(type='str'))), + clustering=dict(type='list', elements='str'), + description=dict(type='str'), + friendly_name=dict(type='str'), + labels=dict(type='dict'), + name=dict(type='str'), + num_rows=dict(type='int'), + view=dict( + type='dict', + options=dict( + use_legacy_sql=dict(type='bool'), + user_defined_function_resources=dict( + type='list', elements='dict', options=dict(inline_code=dict(type='str'), resource_uri=dict(type='str')) + ), + ), + ), + time_partitioning=dict(type='dict', options=dict(expiration_ms=dict(type='int'), field=dict(type='str'), type=dict(type='str'))), + schema=dict( + type='dict', + options=dict( + fields=dict( + type='list', + elements='dict', + options=dict( + description=dict(type='str'), + fields=dict(type='list', elements='str'), + mode=dict(type='str'), + name=dict(type='str'), + type=dict(type='str'), + ), + ) + ), + ), + encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(type='str'))), + expiration_time=dict(type='int'), + external_data_configuration=dict( + type='dict', + options=dict( + autodetect=dict(type='bool'), + compression=dict(type='str'), + ignore_unknown_values=dict(type='bool'), + max_bad_records=dict(default=0, type='int'), + source_format=dict(type='str'), + source_uris=dict(type='list', elements='str'), + schema=dict( + type='dict', + options=dict( + fields=dict( + type='list', + elements='dict', + options=dict( + description=dict(type='str'), + fields=dict(type='list', elements='str'), + mode=dict(type='str'), + name=dict(type='str'), + type=dict(type='str'), + ), + ) + ), + ), + google_sheets_options=dict(type='dict', options=dict(skip_leading_rows=dict(default=0, type='int'))), + csv_options=dict( + type='dict', + options=dict( + allow_jagged_rows=dict(type='bool'), + allow_quoted_newlines=dict(type='bool'), + encoding=dict(type='str'), + field_delimiter=dict(type='str'), + quote=dict(type='str'), + skip_leading_rows=dict(default=0, type='int'), + ), + ), + bigtable_options=dict( + type='dict', + options=dict( + ignore_unspecified_column_families=dict(type='bool'), + read_rowkey_as_string=dict(type='bool'), + column_families=dict( + type='list', + elements='dict', + options=dict( + columns=dict( + type='list', + elements='dict', + options=dict( + encoding=dict(type='str'), + field_name=dict(type='str'), + only_read_latest=dict(type='bool'), + qualifier_string=dict(required=True, type='str'), + type=dict(type='str'), + ), + ), + encoding=dict(type='str'), + family_id=dict(type='str'), + only_read_latest=dict(type='bool'), + type=dict(type='str'), + ), + ), + ), + ), + ), + ), + dataset=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] + + state = module.params['state'] + kind = 'bigquery#table' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.post(link, resource_to_request(module)), kind) + + +def update(module, link, kind): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.put(link, resource_to_request(module)), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.delete(link), kind) + + +def resource_to_request(module): + request = { + u'kind': 'bigquery#table', + u'tableReference': TableTablereference(module.params.get('table_reference', {}), module).to_request(), + u'clustering': module.params.get('clustering'), + u'description': module.params.get('description'), + u'friendlyName': module.params.get('friendly_name'), + u'labels': module.params.get('labels'), + u'name': module.params.get('name'), + u'numRows': module.params.get('num_rows'), + u'view': TableView(module.params.get('view', {}), module).to_request(), + u'timePartitioning': TableTimepartitioning(module.params.get('time_partitioning', {}), module).to_request(), + u'schema': TableSchema(module.params.get('schema', {}), module).to_request(), + u'encryptionConfiguration': TableEncryptionconfiguration(module.params.get('encryption_configuration', {}), module).to_request(), + u'expirationTime': module.params.get('expiration_time'), + u'externalDataConfiguration': TableExternaldataconfiguration(module.params.get('external_data_configuration', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'bigquery') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables/{name}".format(**module.params) + + +def collection(module): + return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'tableReference': TableTablereference(response.get(u'tableReference', {}), module).from_response(), + u'clustering': response.get(u'clustering'), + u'creationTime': response.get(u'creationTime'), + u'description': response.get(u'description'), + u'friendlyName': response.get(u'friendlyName'), + u'id': response.get(u'id'), + u'labels': response.get(u'labels'), + u'lastModifiedTime': response.get(u'lastModifiedTime'), + u'location': response.get(u'location'), + u'name': response.get(u'name'), + u'numBytes': response.get(u'numBytes'), + u'numLongTermBytes': response.get(u'numLongTermBytes'), + u'numRows': response.get(u'numRows'), + u'requirePartitionFilter': response.get(u'requirePartitionFilter'), + u'type': response.get(u'type'), + u'view': TableView(response.get(u'view', {}), module).from_response(), + u'timePartitioning': TableTimepartitioning(response.get(u'timePartitioning', {}), module).from_response(), + u'streamingBuffer': TableStreamingbuffer(response.get(u'streamingBuffer', {}), module).from_response(), + u'schema': TableSchema(response.get(u'schema', {}), module).from_response(), + u'encryptionConfiguration': TableEncryptionconfiguration(response.get(u'encryptionConfiguration', {}), module).from_response(), + u'expirationTime': response.get(u'expirationTime'), + u'externalDataConfiguration': TableExternaldataconfiguration(response.get(u'externalDataConfiguration', {}), module).from_response(), + } + + +class TableTablereference(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id'), u'tableId': self.request.get('table_id')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'datasetId': self.request.get(u'datasetId'), u'projectId': self.request.get(u'projectId'), u'tableId': self.request.get(u'tableId')} + ) + + +class TableView(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'useLegacySql': self.request.get('use_legacy_sql'), + u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray( + self.request.get('user_defined_function_resources', []), self.module + ).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'useLegacySql': self.request.get(u'useLegacySql'), + u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray( + self.request.get(u'userDefinedFunctionResources', []), self.module + ).from_response(), + } + ) + + +class TableUserdefinedfunctionresourcesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'inlineCode': item.get('inline_code'), u'resourceUri': item.get('resource_uri')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'inlineCode': item.get(u'inlineCode'), u'resourceUri': item.get(u'resourceUri')}) + + +class TableTimepartitioning(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'expirationMs': self.request.get('expiration_ms'), u'field': self.request.get('field'), u'type': self.request.get('type')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'expirationMs': self.request.get(u'expirationMs'), u'field': self.request.get(u'field'), u'type': self.request.get(u'type')} + ) + + +class TableStreamingbuffer(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({}) + + def from_response(self): + return remove_nones_from_dict({}) + + +class TableSchema(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()}) + + def from_response(self): + return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()}) + + +class TableFieldsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'description': item.get('description'), + u'fields': item.get('fields'), + u'mode': item.get('mode'), + u'name': item.get('name'), + u'type': item.get('type'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'description': item.get(u'description'), + u'fields': item.get(u'fields'), + u'mode': item.get(u'mode'), + u'name': item.get(u'name'), + u'type': item.get(u'type'), + } + ) + + +class TableEncryptionconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')}) + + def from_response(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')}) + + +class TableExternaldataconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'autodetect': self.request.get('autodetect'), + u'compression': self.request.get('compression'), + u'ignoreUnknownValues': self.request.get('ignore_unknown_values'), + u'maxBadRecords': self.request.get('max_bad_records'), + u'sourceFormat': self.request.get('source_format'), + u'sourceUris': self.request.get('source_uris'), + u'schema': TableSchema(self.request.get('schema', {}), self.module).to_request(), + u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get('google_sheets_options', {}), self.module).to_request(), + u'csvOptions': TableCsvoptions(self.request.get('csv_options', {}), self.module).to_request(), + u'bigtableOptions': TableBigtableoptions(self.request.get('bigtable_options', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'autodetect': self.request.get(u'autodetect'), + u'compression': self.request.get(u'compression'), + u'ignoreUnknownValues': self.request.get(u'ignoreUnknownValues'), + u'maxBadRecords': self.request.get(u'maxBadRecords'), + u'sourceFormat': self.request.get(u'sourceFormat'), + u'sourceUris': self.request.get(u'sourceUris'), + u'schema': TableSchema(self.request.get(u'schema', {}), self.module).from_response(), + u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get(u'googleSheetsOptions', {}), self.module).from_response(), + u'csvOptions': TableCsvoptions(self.request.get(u'csvOptions', {}), self.module).from_response(), + u'bigtableOptions': TableBigtableoptions(self.request.get(u'bigtableOptions', {}), self.module).from_response(), + } + ) + + +class TableSchema(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()}) + + def from_response(self): + return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()}) + + +class TableFieldsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'description': item.get('description'), + u'fields': item.get('fields'), + u'mode': item.get('mode'), + u'name': item.get('name'), + u'type': item.get('type'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'description': item.get(u'description'), + u'fields': item.get(u'fields'), + u'mode': item.get(u'mode'), + u'name': item.get(u'name'), + u'type': item.get(u'type'), + } + ) + + +class TableGooglesheetsoptions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'skipLeadingRows': self.request.get('skip_leading_rows')}) + + def from_response(self): + return remove_nones_from_dict({u'skipLeadingRows': self.request.get(u'skipLeadingRows')}) + + +class TableCsvoptions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowJaggedRows': self.request.get('allow_jagged_rows'), + u'allowQuotedNewlines': self.request.get('allow_quoted_newlines'), + u'encoding': self.request.get('encoding'), + u'fieldDelimiter': self.request.get('field_delimiter'), + u'quote': self.request.get('quote'), + u'skipLeadingRows': self.request.get('skip_leading_rows'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowJaggedRows': self.request.get(u'allowJaggedRows'), + u'allowQuotedNewlines': self.request.get(u'allowQuotedNewlines'), + u'encoding': self.request.get(u'encoding'), + u'fieldDelimiter': self.request.get(u'fieldDelimiter'), + u'quote': self.request.get(u'quote'), + u'skipLeadingRows': self.request.get(u'skipLeadingRows'), + } + ) + + +class TableBigtableoptions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'ignoreUnspecifiedColumnFamilies': self.request.get('ignore_unspecified_column_families'), + u'readRowkeyAsString': self.request.get('read_rowkey_as_string'), + u'columnFamilies': TableColumnfamiliesArray(self.request.get('column_families', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'ignoreUnspecifiedColumnFamilies': self.request.get(u'ignoreUnspecifiedColumnFamilies'), + u'readRowkeyAsString': self.request.get(u'readRowkeyAsString'), + u'columnFamilies': TableColumnfamiliesArray(self.request.get(u'columnFamilies', []), self.module).from_response(), + } + ) + + +class TableColumnfamiliesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'columns': TableColumnsArray(item.get('columns', []), self.module).to_request(), + u'encoding': item.get('encoding'), + u'familyId': item.get('family_id'), + u'onlyReadLatest': item.get('only_read_latest'), + u'type': item.get('type'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'columns': TableColumnsArray(item.get(u'columns', []), self.module).from_response(), + u'encoding': item.get(u'encoding'), + u'familyId': item.get(u'familyId'), + u'onlyReadLatest': item.get(u'onlyReadLatest'), + u'type': item.get(u'type'), + } + ) + + +class TableColumnsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'encoding': item.get('encoding'), + u'fieldName': item.get('field_name'), + u'onlyReadLatest': item.get('only_read_latest'), + u'qualifierString': item.get('qualifier_string'), + u'type': item.get('type'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'encoding': item.get(u'encoding'), + u'fieldName': item.get(u'fieldName'), + u'onlyReadLatest': item.get(u'onlyReadLatest'), + u'qualifierString': item.get(u'qualifierString'), + u'type': item.get(u'type'), + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table_info.py new file mode 100644 index 000000000..99b89acb1 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table_info.py @@ -0,0 +1,626 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigquery_table_info +description: +- Gather info for GCP Table +short_description: Gather info for GCP Table +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + dataset: + description: + - Name of the dataset. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a table + gcp_bigquery_table_info: + dataset: example_dataset + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + tableReference: + description: + - Reference describing the ID of this table. + returned: success + type: complex + contains: + datasetId: + description: + - The ID of the dataset containing this table. + returned: success + type: str + projectId: + description: + - The ID of the project containing this table. + returned: success + type: str + tableId: + description: + - The ID of the the table. + returned: success + type: str + clustering: + description: + - One or more fields on which data should be clustered. Only top-level, non-repeated, + simple-type fields are supported. When you cluster a table using multiple + columns, the order of columns you specify is important. The order of the specified + columns determines the sort order of the data. + returned: success + type: list + creationTime: + description: + - The time when this dataset was created, in milliseconds since the epoch. + returned: success + type: int + description: + description: + - A user-friendly description of the dataset. + returned: success + type: str + friendlyName: + description: + - A descriptive name for this table. + returned: success + type: str + id: + description: + - An opaque ID uniquely identifying the table. + returned: success + type: str + labels: + description: + - The labels associated with this dataset. You can use these to organize and + group your datasets . + returned: success + type: dict + lastModifiedTime: + description: + - The time when this table was last modified, in milliseconds since the epoch. + returned: success + type: int + location: + description: + - The geographic location where the table resides. This value is inherited from + the dataset. + returned: success + type: str + name: + description: + - Name of the table. + returned: success + type: str + numBytes: + description: + - The size of this table in bytes, excluding any data in the streaming buffer. + returned: success + type: int + numLongTermBytes: + description: + - The number of bytes in the table that are considered "long-term storage". + returned: success + type: int + numRows: + description: + - The number of rows of data in this table, excluding any data in the streaming + buffer. + returned: success + type: int + requirePartitionFilter: + description: + - If set to true, queries over this table require a partition filter that can + be used for partition elimination to be specified. + returned: success + type: bool + type: + description: + - Describes the table type. + returned: success + type: str + view: + description: + - The view definition. + returned: success + type: complex + contains: + useLegacySql: + description: + - Specifies whether to use BigQuery's legacy SQL for this view . + returned: success + type: bool + userDefinedFunctionResources: + description: + - Describes user-defined function resources used in the query. + returned: success + type: complex + contains: + inlineCode: + description: + - An inline resource that contains code for a user-defined function + (UDF). Providing a inline code resource is equivalent to providing + a URI for a file containing the same code. + returned: success + type: str + resourceUri: + description: + - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). + returned: success + type: str + timePartitioning: + description: + - If specified, configures time-based partitioning for this table. + returned: success + type: complex + contains: + expirationMs: + description: + - Number of milliseconds for which to keep the storage for a partition. + returned: success + type: int + field: + description: + - If not set, the table is partitioned by pseudo column, referenced via + either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE + type. If field is specified, the table is instead partitioned by this + field. The field must be a top-level TIMESTAMP or DATE field. Its mode + must be NULLABLE or REQUIRED. + returned: success + type: str + type: + description: + - The only type supported is DAY, which will generate one partition per + day. + returned: success + type: str + streamingBuffer: + description: + - Contains information regarding this table's streaming buffer, if one is present. + This field will be absent if the table is not being streamed to or if there + is no data in the streaming buffer. + returned: success + type: complex + contains: + estimatedBytes: + description: + - A lower-bound estimate of the number of bytes currently in the streaming + buffer. + returned: success + type: int + estimatedRows: + description: + - A lower-bound estimate of the number of rows currently in the streaming + buffer. + returned: success + type: int + oldestEntryTime: + description: + - Contains the timestamp of the oldest entry in the streaming buffer, in + milliseconds since the epoch, if the streaming buffer is available. + returned: success + type: int + schema: + description: + - Describes the schema of this table. + returned: success + type: complex + contains: + fields: + description: + - Describes the fields in a table. + returned: success + type: complex + contains: + description: + description: + - The field description. The maximum length is 1,024 characters. + returned: success + type: str + fields: + description: + - Describes the nested schema fields if the type property is set to + RECORD. + returned: success + type: list + mode: + description: + - The field mode. + returned: success + type: str + name: + description: + - The field name. + returned: success + type: str + type: + description: + - The field data type. + returned: success + type: str + encryptionConfiguration: + description: + - Custom encryption configuration. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + returned: success + type: str + expirationTime: + description: + - The time when this table expires, in milliseconds since the epoch. If not + present, the table will persist indefinitely. + returned: success + type: int + externalDataConfiguration: + description: + - Describes the data format, location, and other properties of a table stored + outside of BigQuery. By defining these properties, the data source can then + be queried as if it were a standard BigQuery table. + returned: success + type: complex + contains: + autodetect: + description: + - Try to detect schema and format options automatically. Any option specified + explicitly will be honored. + returned: success + type: bool + compression: + description: + - The compression type of the data source. + returned: success + type: str + ignoreUnknownValues: + description: + - Indicates if BigQuery should allow extra values that are not represented + in the table schema . + returned: success + type: bool + maxBadRecords: + description: + - The maximum number of bad records that BigQuery can ignore when reading + data . + returned: success + type: int + sourceFormat: + description: + - The data format. + returned: success + type: str + sourceUris: + description: + - The fully-qualified URIs that point to your data in Google Cloud. + - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard + character and it must come after the ''bucket'' name. Size limits related + to load jobs apply to external data sources. For Google Cloud Bigtable + URIs: Exactly one URI can be specified and it has be a fully specified + and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud + Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard + character is not allowed.' + returned: success + type: list + schema: + description: + - The schema for the data. Schema is required for CSV and JSON formats. + returned: success + type: complex + contains: + fields: + description: + - Describes the fields in a table. + returned: success + type: complex + contains: + description: + description: + - The field description. + returned: success + type: str + fields: + description: + - Describes the nested schema fields if the type property is set + to RECORD . + returned: success + type: list + mode: + description: + - Field mode. + returned: success + type: str + name: + description: + - Field name. + returned: success + type: str + type: + description: + - Field data type. + returned: success + type: str + googleSheetsOptions: + description: + - Additional options if sourceFormat is set to GOOGLE_SHEETS. + returned: success + type: complex + contains: + skipLeadingRows: + description: + - The number of rows at the top of a Google Sheet that BigQuery will + skip when reading the data. + returned: success + type: int + csvOptions: + description: + - Additional properties to set if sourceFormat is set to CSV. + returned: success + type: complex + contains: + allowJaggedRows: + description: + - Indicates if BigQuery should accept rows that are missing trailing + optional columns . + returned: success + type: bool + allowQuotedNewlines: + description: + - Indicates if BigQuery should allow quoted data sections that contain + newline characters in a CSV file . + returned: success + type: bool + encoding: + description: + - The character encoding of the data. + returned: success + type: str + fieldDelimiter: + description: + - The separator for fields in a CSV file. + returned: success + type: str + quote: + description: + - The value that is used to quote data sections in a CSV file. + returned: success + type: str + skipLeadingRows: + description: + - The number of rows at the top of a CSV file that BigQuery will skip + when reading the data. + returned: success + type: int + bigtableOptions: + description: + - Additional options if sourceFormat is set to BIGTABLE. + returned: success + type: complex + contains: + ignoreUnspecifiedColumnFamilies: + description: + - If field is true, then the column families that are not specified + in columnFamilies list are not exposed in the table schema . + returned: success + type: bool + readRowkeyAsString: + description: + - If field is true, then the rowkey column families will be read and + converted to string. + returned: success + type: bool + columnFamilies: + description: + - List of column families to expose in the table schema along with their + types. + returned: success + type: complex + contains: + columns: + description: + - Lists of columns that should be exposed as individual fields as + opposed to a list of (column name, value) pairs. + returned: success + type: complex + contains: + encoding: + description: + - The encoding of the values when the type is not STRING. + returned: success + type: str + fieldName: + description: + - If the qualifier is not a valid BigQuery field identifier, + a valid identifier must be provided as the column field name + and is used as field name in queries. + returned: success + type: str + onlyReadLatest: + description: + - If this is set, only the latest version of value in this column + are exposed . + returned: success + type: bool + qualifierString: + description: + - Qualifier of the column. + returned: success + type: str + type: + description: + - The type to convert the value in cells of this column. + returned: success + type: str + encoding: + description: + - The encoding of the values when the type is not STRING. + returned: success + type: str + familyId: + description: + - Identifier of the column family. + returned: success + type: str + onlyReadLatest: + description: + - If this is set only the latest version of value are exposed for + all columns in this column family . + returned: success + type: bool + type: + description: + - The type to convert the value in cells of this column family. + returned: success + type: str + dataset: + description: + - Name of the dataset. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(dataset=dict(type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'bigquery') + return auth.list(link, return_if_object, array_name='tables') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance.py new file mode 100644 index 000000000..b70b00456 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance.py @@ -0,0 +1,535 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigtable_instance +description: +- A collection of Bigtable Tables and the resources that serve them. All tables in + an instance are served from all Clusters in the instance. +short_description: Creates a GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The unique name of the instance. + required: false + type: str + display_name: + description: + - The descriptive name for this instance as it appears in UIs. + - Can be changed at any time, but should be kept globally unique to avoid confusion. + required: false + type: str + type: + description: + - The type of the instance. Defaults to `PRODUCTION`. + - 'Some valid choices include: "TYPE_UNSPECIFIED", "PRODUCTION", "DEVELOPMENT"' + required: false + type: str + labels: + description: + - Labels are a flexible and lightweight mechanism for organizing cloud resources + into groups that reflect a customer's organizational needs and deployment strategies. + They can be used to filter resources and aggregate metrics. + required: false + type: dict + clusters: + description: + - An array of clusters. Maximum 4. + elements: dict + required: false + type: list + suboptions: + name: + description: + - The unique name of the cluster. + required: false + type: str + serve_nodes: + description: + - The number of nodes allocated to this cluster. More nodes enable higher + throughput and more consistent performance. + required: false + type: int + default_storage_type: + description: + - The type of storage used by this cluster to serve its parent instance's + tables, unless explicitly overridden. + - 'Some valid choices include: "STORAGE_TYPE_UNSPECIFIED", "SSD", "HDD"' + required: false + type: str + location: + description: + - The location where this cluster's nodes and storage reside. For best performance, + clients should be located as close as possible to this cluster. Currently + only zones are supported, so values should be of the form `projects//locations/`. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_bigtable_instance: + name: my-instance + display_name: My Test Cluster + clusters: + - name: mycluster + location: projects/test_project/locations/us-central1-a + serve_nodes: 1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +state: + description: + - The current state of the instance. + returned: success + type: str +name: + description: + - The unique name of the instance. + returned: success + type: str +displayName: + description: + - The descriptive name for this instance as it appears in UIs. + - Can be changed at any time, but should be kept globally unique to avoid confusion. + returned: success + type: str +type: + description: + - The type of the instance. Defaults to `PRODUCTION`. + returned: success + type: str +labels: + description: + - Labels are a flexible and lightweight mechanism for organizing cloud resources + into groups that reflect a customer's organizational needs and deployment strategies. + They can be used to filter resources and aggregate metrics. + returned: success + type: dict +clusters: + description: + - An array of clusters. Maximum 4. + returned: success + type: complex + contains: + name: + description: + - The unique name of the cluster. + returned: success + type: str + serveNodes: + description: + - The number of nodes allocated to this cluster. More nodes enable higher throughput + and more consistent performance. + returned: success + type: int + defaultStorageType: + description: + - The type of storage used by this cluster to serve its parent instance's tables, + unless explicitly overridden. + returned: success + type: str + location: + description: + - The location where this cluster's nodes and storage reside. For best performance, + clients should be located as close as possible to this cluster. Currently + only zones are supported, so values should be of the form `projects//locations/`. + returned: success + type: str + state: + description: + - The current state of the cluster. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(type='str'), + display_name=dict(type='str'), + type=dict(type='str'), + labels=dict(type='dict'), + clusters=dict( + type='list', + elements='dict', + options=dict(name=dict(type='str'), serve_nodes=dict(type='int'), default_storage_type=dict(type='str'), location=dict(type='str')), + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'bigtable') + return wait_for_operation(module, auth.post(link, resource_to_create(module))) + + +def update(module, link): + auth = GcpSession(module, 'bigtable') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'bigtable') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'displayName': module.params.get('display_name'), + u'type': module.params.get('type'), + u'labels': module.params.get('labels'), + u'clusters': InstanceClustersArray(module.params.get('clusters', []), module).to_request(), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'bigtable') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://bigtableadmin.googleapis.com/v2/projects/{project}/instances/{name}".format(**module.params) + + +def collection(module): + return "https://bigtableadmin.googleapis.com/v2/projects/{project}/instances".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'state': response.get(u'state'), + u'name': response.get(u'name'), + u'displayName': response.get(u'displayName'), + u'type': response.get(u'type'), + u'labels': response.get(u'labels'), + u'clusters': InstanceClustersArray(response.get(u'clusters', []), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://bigtableadmin.googleapis.com/v2/operations/{module.params['clusters'][0]['location']}/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = bigtable_async_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def resource_to_create(module): + instance = resource_to_request(module) + if 'name' in instance: + del instance['name'] + + clusters = [] + if 'clusters' in instance: + clusters = instance['clusters'] + del instance['clusters'] + + return {'instanceId': module.params['name'].split('/')[-1], 'instance': instance, 'clusters': clusters} + + +def encode_request(request, module): + if 'name' in request: + del request['name'] + + if 'clusters' in request: + request['clusters'] = convert_clusters_to_map(request['clusters']) + return request + + +def decode_response(response, module): + if 'name' in response: + response['name'] = response['name'].split('/')[-1] + + if 'clusters' in response: + response['clusters'] = convert_map_to_clusters(response['clusters']) + return response + + +def convert_clusters_to_map(clusters): + cmap = {} + for cluster in clusters: + cmap[cluster['name']] = cluster + del cmap[cluster['name']]['name'] + return cmap + + +def convert_map_to_clusters(clusters): + carray = [] + for key, cluster in clusters.items(): + cluster['name'] = key.split('/')[-1] + carray.append(cluster) + return carray + + +def bigtable_async_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + location_name = module.params['clusters'][0]['location'].split('/')[-1] + + url = 'https://bigtableadmin.googleapis.com/v2/operations/projects/%s' '/instances/%s/locations/%s/operations/{op_id}' % ( + module.params['project'], + module.params['name'], + location_name, + ) + + return url.format(**extra_data) + + +class InstanceClustersArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get('name'), + u'serveNodes': item.get('serve_nodes'), + u'defaultStorageType': item.get('default_storage_type'), + u'location': item.get('location'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get(u'name'), + u'serveNodes': item.get(u'serveNodes'), + u'defaultStorageType': item.get(u'defaultStorageType'), + u'location': item.get(u'location'), + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance_info.py new file mode 100644 index 000000000..deefad028 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_bigtable_instance_info.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigtable_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_bigtable_instance_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + state: + description: + - The current state of the instance. + returned: success + type: str + name: + description: + - The unique name of the instance. + returned: success + type: str + displayName: + description: + - The descriptive name for this instance as it appears in UIs. + - Can be changed at any time, but should be kept globally unique to avoid confusion. + returned: success + type: str + type: + description: + - The type of the instance. Defaults to `PRODUCTION`. + returned: success + type: str + labels: + description: + - Labels are a flexible and lightweight mechanism for organizing cloud resources + into groups that reflect a customer's organizational needs and deployment + strategies. They can be used to filter resources and aggregate metrics. + returned: success + type: dict + clusters: + description: + - An array of clusters. Maximum 4. + returned: success + type: complex + contains: + name: + description: + - The unique name of the cluster. + returned: success + type: str + serveNodes: + description: + - The number of nodes allocated to this cluster. More nodes enable higher + throughput and more consistent performance. + returned: success + type: int + defaultStorageType: + description: + - The type of storage used by this cluster to serve its parent instance's + tables, unless explicitly overridden. + returned: success + type: str + location: + description: + - The location where this cluster's nodes and storage reside. For best performance, + clients should be located as close as possible to this cluster. Currently + only zones are supported, so values should be of the form `projects//locations/`. + returned: success + type: str + state: + description: + - The current state of the cluster. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://bigtableadmin.googleapis.com/v2/projects/{project}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'bigtable') + return auth.list(link, return_if_object, array_name='instances') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger.py new file mode 100644 index 000000000..da506a0f0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger.py @@ -0,0 +1,2246 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudbuild_trigger +description: +- Configuration for an automated build in response to source repository changes. +short_description: Creates a GCP Trigger +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + id: + description: + - The unique identifier for the trigger. + required: false + type: str + name: + description: + - Name of the trigger. Must be unique within the project. + required: false + type: str + description: + description: + - Human-readable description of the trigger. + required: false + type: str + tags: + description: + - Tags for annotation of a BuildTrigger . + elements: str + required: false + type: list + disabled: + description: + - Whether the trigger is disabled or not. If true, the trigger will never result + in a build. + required: false + type: bool + substitutions: + description: + - Substitutions data for Build resource. + required: false + type: dict + filename: + description: + - Path, from the source root, to a file whose contents is used for the template. + Either a filename or build template must be provided. + required: false + type: str + ignored_files: + description: + - ignoredFiles and includedFiles are file glob matches using U(https://golang.org/pkg/path/filepath/#Match) + extended with support for `**`. + - If ignoredFiles and changed files are both empty, then they are not used to + determine whether or not to trigger a build. + - If ignoredFiles is not empty, then we ignore any files that match any of the + ignored_file globs. If the change has no files that are outside of the ignoredFiles + globs, then we do not trigger a build. + elements: str + required: false + type: list + included_files: + description: + - ignoredFiles and includedFiles are file glob matches using U(https://golang.org/pkg/path/filepath/#Match) + extended with support for `**`. + - If any of the files altered in the commit pass the ignoredFiles filter and includedFiles + is empty, then as far as this filter is concerned, we should trigger the build. + - If any of the files altered in the commit pass the ignoredFiles filter and includedFiles + is not empty, then we make sure that at least one of those files matches a includedFiles + glob. If not, then we do not trigger a build. + elements: str + required: false + type: list + trigger_template: + description: + - Template describing the types of source changes to trigger a build. + - Branch and tag names in trigger templates are interpreted as regular expressions. + Any branch or tag change that matches that regular expression will trigger a + build. + required: false + type: dict + suboptions: + project_id: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, the + project ID requesting the build is assumed. + required: false + type: str + repo_name: + description: + - Name of the Cloud Source Repository. If omitted, the name "default" is assumed. + required: false + default: default + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and is an absolute + path, this value is ignored for that step's execution. + required: false + type: str + invert_regex: + description: + - Only trigger a build if the revision regex does NOT match the revision regex. + required: false + type: bool + branch_name: + description: + - Name of the branch to build. Exactly one a of branch name, tag, or commit + SHA must be provided. + - This field is a regular expression. + required: false + type: str + tag_name: + description: + - Name of the tag to build. Exactly one of a branch name, tag, or commit SHA + must be provided. + - This field is a regular expression. + required: false + type: str + commit_sha: + description: + - Explicit commit SHA to build. Exactly one of a branch name, tag, or commit + SHA must be provided. + required: false + type: str + github: + description: + - Describes the configuration of a trigger that creates a build whenever a GitHub + event is received. + required: false + type: dict + suboptions: + owner: + description: + - 'Owner of the repository. For example: The owner for U(https://github.com/googlecloudplatform/cloud-builders) + is "googlecloudplatform".' + required: false + type: str + name: + description: + - 'Name of the repository. For example: The name for U(https://github.com/googlecloudplatform/cloud-builders) + is "cloud-builders".' + required: false + type: str + pull_request: + description: + - filter to match changes in pull requests. Specify only one of pullRequest + or push. + required: false + type: dict + suboptions: + branch: + description: + - Regex of branches to match. + required: true + type: str + comment_control: + description: + - Whether to block builds on a "/gcbrun" comment from a repository owner + or collaborator. + - 'Some valid choices include: "COMMENTS_DISABLED", "COMMENTS_ENABLED", + "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"' + required: false + type: str + invert_regex: + description: + - If true, branches that do NOT match the git_ref will trigger a build. + required: false + type: bool + push: + description: + - filter to match changes in refs, like branches or tags. Specify only one + of pullRequest or push. + required: false + type: dict + suboptions: + invert_regex: + description: + - When true, only trigger a build if the revision regex does NOT match + the git_ref regex. + required: false + type: bool + branch: + description: + - Regex of branches to match. Specify only one of branch or tag. + required: false + type: str + tag: + description: + - Regex of tags to match. Specify only one of branch or tag. + required: false + type: str + pubsub_config: + description: + - PubsubConfig describes the configuration of a trigger that creates a build whenever + a Pub/Sub message is published. + required: false + type: dict + suboptions: + topic: + description: + - The name of the topic from which this subscription is receiving messages. + required: true + type: str + service_account_email: + description: + - Service account that will make the push request. + required: false + type: str + webhook_config: + description: + - WebhookConfig describes the configuration of a trigger that creates a build + whenever a webhook is sent to a trigger's webhook URL. + required: false + type: dict + suboptions: + secret: + description: + - Resource name for the secret required as a URL parameter. + required: true + type: str + build: + description: + - Contents of the build template. Either a filename or build template must be + provided. + required: false + type: dict + suboptions: + source: + description: + - The location of the source files to build. + required: false + type: dict + suboptions: + storage_source: + description: + - Location of the source in an archive file in Google Cloud Storage. + required: false + type: dict + suboptions: + bucket: + description: + - Google Cloud Storage bucket containing the source. + required: true + type: str + object: + description: + - Google Cloud Storage object containing the source. + - This object must be a gzipped archive file (.tar.gz) containing + source to build. + required: true + type: str + generation: + description: + - Google Cloud Storage generation for the object. If the generation + is omitted, the latest generation will be used . + required: false + type: str + repo_source: + description: + - Location of the source in a Google Cloud Source Repository. + required: false + type: dict + suboptions: + project_id: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, + the project ID requesting the build is assumed. + required: false + type: str + repo_name: + description: + - Name of the Cloud Source Repository. + required: true + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and is + an absolute path, this value is ignored for that step's execution. + required: false + type: str + invert_regex: + description: + - Only trigger a build if the revision regex does NOT match the revision + regex. + required: false + type: bool + substitutions: + description: + - Substitutions to use in a triggered build. Should only be used with + triggers.run . + required: false + type: dict + branch_name: + description: + - Regex matching branches to build. Exactly one a of branch name, + tag, or commit SHA must be provided. + - The syntax of the regular expressions accepted is the syntax accepted + by RE2 and described at U(https://github.com/google/re2/wiki/Syntax) + . + required: false + type: str + tag_name: + description: + - Regex matching tags to build. Exactly one a of branch name, tag, + or commit SHA must be provided. + - The syntax of the regular expressions accepted is the syntax accepted + by RE2 and described at U(https://github.com/google/re2/wiki/Syntax) + . + required: false + type: str + commit_sha: + description: + - Explicit commit SHA to build. Exactly one a of branch name, tag, + or commit SHA must be provided. + required: false + type: str + tags: + description: + - Tags for annotation of a Build. These are not docker tags. + elements: str + required: false + type: list + images: + description: + - A list of images to be pushed upon the successful completion of all build + steps. + - The images are pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's + results field. + - If any of the images fail to be pushed, the build status is marked FAILURE. + elements: str + required: false + type: list + substitutions: + description: + - Substitutions data for Build resource. + required: false + type: dict + queue_ttl: + description: + - TTL in queue for this build. If provided and the build is enqueued longer + than this value, the build will expire and the build status will be EXPIRED. + - The TTL starts ticking from createTime. + - 'A duration in seconds with up to nine fractional digits, terminated by + ''s''. Example: "3.5s".' + required: false + type: str + logs_bucket: + description: + - Google Cloud Storage bucket where logs should be written. Logs file names + will be of the format ${logsBucket}/log-${build_id}.txt. + required: false + type: str + timeout: + description: + - Amount of time that this build should be allowed to run, to second granularity. + - If this amount of time elapses, work on the build will cease and the build + status will be TIMEOUT. + - This timeout must be equal to or greater than the sum of the timeouts for + build steps within the build. + - The expected format is the number of seconds followed by s. + - Default time is ten minutes (600s). + required: false + default: 600s + type: str + secrets: + description: + - Secrets to decrypt using Cloud Key Management Service. + elements: dict + required: false + type: list + suboptions: + kms_key_name: + description: + - Cloud KMS key name to use to decrypt these envs. + required: true + type: str + secret_env: + description: + - Map of environment variable name to its encrypted value. + - Secret environment variables must be unique across all of a build's + secrets, and must be used by at least one build step. Values can be + at most 64 KB in size. There can be at most 100 secret values across + all of a build's secrets. + required: false + type: dict + steps: + description: + - The operations to be performed on the workspace. + elements: dict + required: true + type: list + suboptions: + name: + description: + - The name of the container image that will run this particular build + step. + - If the image is available in the host's Docker daemon's cache, it will + be run directly. If not, the host will attempt to pull the image first, + using the builder service account's credentials if necessary. + - The Docker daemon's cache will already have the latest versions of all + of the officially supported build steps (see U(https://github.com/GoogleCloudPlatform/cloud-builders) + for images and examples). + - The Docker daemon will also have cached many of the layers for some + popular images, like "ubuntu", "debian", but they will be refreshed + at the time you attempt to use them. + - If you built an image in a previous build step, it will be stored in + the host's Docker daemon's cache and is available to use as the name + for a later build step. + required: true + type: str + args: + description: + - A list of arguments that will be presented to the step when it is started. + - If the image used to run the step's container has an entrypoint, the + args are used as arguments to that entrypoint. If the image does not + define an entrypoint, the first element in args is used as the entrypoint, + and the remainder will be used as arguments. + elements: str + required: false + type: list + env: + description: + - A list of environment variable definitions to be used when running a + step. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + elements: str + required: false + type: list + id: + description: + - Unique identifier for this build step, used in `wait_for` to reference + this build step as a dependency. + required: false + type: str + entrypoint: + description: + - Entrypoint to be used instead of the build step image's default entrypoint. + - If unset, the image's default entrypoint is used . + required: false + type: str + dir: + description: + - Working directory to use when running this step's container. + - If this value is a relative path, it is relative to the build's working + directory. If this value is absolute, it may be outside the build's + working directory, in which case the contents of the path may not be + persisted across build step executions, unless a `volume` for that path + is specified. + - If the build specifies a `RepoSource` with `dir` and a step with a `dir`, + which specifies an absolute path, the `RepoSource` `dir` is ignored + for the step's execution. + required: false + type: str + secret_env: + description: + - A list of environment variables which are encrypted using a Cloud Key + Management Service crypto key. These values must be specified in the + build's `Secret`. + elements: str + required: false + type: list + timeout: + description: + - Time limit for executing this build step. If not defined, the step has + no time limit and will be allowed to continue to run until either it + completes or the build itself times out. + required: false + type: str + timing: + description: + - Output only. Stores timing information for executing this build step. + required: false + type: str + volumes: + description: + - List of volumes to mount into the build step. + - Each volume is created as an empty volume prior to execution of the + build step. Upon completion of the build, volumes and their contents + are discarded. + - Using a named volume in only one step is not valid as it is indicative + of a build request with an incorrect configuration. + elements: dict + required: false + type: list + suboptions: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. Each named volume must be used by at least two + build steps. + required: true + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + required: true + type: str + wait_for: + description: + - The ID(s) of the step(s) that this build step depends on. + - This build step will not start until all the build steps in `wait_for` + have completed successfully. If `wait_for` is empty, this build step + will start when all previous build steps in the `Build.Steps` list have + completed successfully. + elements: str + required: false + type: list + artifacts: + description: + - Artifacts produced by the build that should be uploaded upon successful + completion of all build steps. + required: false + type: dict + suboptions: + images: + description: + - A list of images to be pushed upon the successful completion of all + build steps. + - The images will be pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's + results field. + - If any of the images fail to be pushed, the build is marked FAILURE. + elements: str + required: false + type: list + objects: + description: + - A list of objects to be uploaded to Cloud Storage upon successful completion + of all build steps. + - Files in the workspace matching specified paths globs will be uploaded + to the Cloud Storage location using the builder service account's credentials. + - The location and generation of the uploaded objects will be stored in + the Build resource's results field. + - If any objects fail to be pushed, the build is marked FAILURE. + required: false + type: dict + suboptions: + location: + description: + - Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". + - Files in the workspace matching any path pattern will be uploaded + to Cloud Storage with this location as a prefix. + required: false + type: str + paths: + description: + - Path globs used to match files in the build's workspace. + elements: str + required: false + type: list + options: + description: + - Special options for this build. + required: false + type: dict + suboptions: + source_provenance_hash: + description: + - Requested hash for SourceProvenance. + elements: str + required: false + type: list + requested_verify_option: + description: + - Requested verifiability options. + - 'Some valid choices include: "NOT_VERIFIED", "VERIFIED"' + required: false + type: str + machine_type: + description: + - Compute Engine machine type on which to run the build. + - 'Some valid choices include: "UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", + "E2_HIGHCPU_8", "E2_HIGHCPU_32"' + required: false + type: str + disk_size_gb: + description: + - Requested disk size for the VM that runs the build. Note that this is + NOT "disk free"; some of the space will be used by the operating system + and build utilities. + - Also note that this is the minimum disk size that will be allocated + for the build -- the build may run with a larger disk than requested. + At present, the maximum disk size is 1000GB; builds that request more + than the maximum are rejected with an error. + required: false + type: int + substitution_option: + description: + - Option to specify behavior when there is an error in the substitution + checks. + - NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot + be overridden in the build configuration file. + - 'Some valid choices include: "MUST_MATCH", "ALLOW_LOOSE"' + required: false + type: str + dynamic_substitutions: + description: + - Option to specify whether or not to apply bash style string operations + to the substitutions. + - NOTE this is always enabled for triggered builds and cannot be overridden + in the build configuration file. + required: false + type: bool + log_streaming_option: + description: + - Option to define build log streaming behavior to Google Cloud Storage. + - 'Some valid choices include: "STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"' + required: false + type: str + worker_pool: + description: + - Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} + This field is experimental. + required: false + type: str + logging: + description: + - Option to specify the logging mode, which determines if and where build + logs are stored. + - 'Some valid choices include: "LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", + "STACKDRIVER_ONLY", "NONE"' + required: false + type: str + env: + description: + - A list of global environment variable definitions that will exist for + all build steps in this build. If a variable is defined in both globally + and in a build step, the variable will use the build step value. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + elements: str + required: false + type: list + secret_env: + description: + - A list of global environment variables, which are encrypted using a + Cloud Key Management Service crypto key. These values must be specified + in the build's Secret. These variables will be available to all build + steps in this build. + elements: str + required: false + type: list + volumes: + description: + - Global list of volumes to mount for ALL build steps Each volume is created + as an empty volume prior to starting the build process. + - Upon completion of the build, volumes and their contents are discarded. + Global volume names and paths cannot conflict with the volumes defined + a build step. + - Using a global volume in a build with only one step is not valid as + it is indicative of a build request with an incorrect configuration. + elements: dict + required: false + type: list + suboptions: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. + - Each named volume must be used by at least two build steps. + required: false + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.triggers)' +- 'Automating builds using build triggers: U(https://cloud.google.com/cloud-build/docs/running-builds/automate-builds)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +- The id for this resource is created by the API after you create the resource the + first time. If you want to manage this resource after creation, you'll have to copy + the generated id into the playbook. If you do not, new triggers will be created + on subsequent runs. +''' + +EXAMPLES = ''' +- name: create a repository + google.cloud.gcp_sourcerepo_repository: + name: projects/{{ gcp_project }}/repos/{{ resource_name }} + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + +- name: create a trigger + google.cloud.gcp_cloudbuild_trigger: + trigger_template: + branch_name: master + project_id: test_project + repo_name: test_object + filename: cloudbuild.yaml + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +id: + description: + - The unique identifier for the trigger. + returned: success + type: str +name: + description: + - Name of the trigger. Must be unique within the project. + returned: success + type: str +description: + description: + - Human-readable description of the trigger. + returned: success + type: str +tags: + description: + - Tags for annotation of a BuildTrigger . + returned: success + type: list +disabled: + description: + - Whether the trigger is disabled or not. If true, the trigger will never result + in a build. + returned: success + type: bool +createTime: + description: + - Time when the trigger was created. + returned: success + type: str +substitutions: + description: + - Substitutions data for Build resource. + returned: success + type: dict +filename: + description: + - Path, from the source root, to a file whose contents is used for the template. + Either a filename or build template must be provided. + returned: success + type: str +ignoredFiles: + description: + - ignoredFiles and includedFiles are file glob matches using U(https://golang.org/pkg/path/filepath/#Match) + extended with support for `**`. + - If ignoredFiles and changed files are both empty, then they are not used to determine + whether or not to trigger a build. + - If ignoredFiles is not empty, then we ignore any files that match any of the ignored_file + globs. If the change has no files that are outside of the ignoredFiles globs, + then we do not trigger a build. + returned: success + type: list +includedFiles: + description: + - ignoredFiles and includedFiles are file glob matches using U(https://golang.org/pkg/path/filepath/#Match) + extended with support for `**`. + - If any of the files altered in the commit pass the ignoredFiles filter and includedFiles + is empty, then as far as this filter is concerned, we should trigger the build. + - If any of the files altered in the commit pass the ignoredFiles filter and includedFiles + is not empty, then we make sure that at least one of those files matches a includedFiles + glob. If not, then we do not trigger a build. + returned: success + type: list +triggerTemplate: + description: + - Template describing the types of source changes to trigger a build. + - Branch and tag names in trigger templates are interpreted as regular expressions. + Any branch or tag change that matches that regular expression will trigger a build. + returned: success + type: complex + contains: + projectId: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, the project + ID requesting the build is assumed. + returned: success + type: str + repoName: + description: + - Name of the Cloud Source Repository. If omitted, the name "default" is assumed. + returned: success + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and is an absolute + path, this value is ignored for that step's execution. + returned: success + type: str + invertRegex: + description: + - Only trigger a build if the revision regex does NOT match the revision regex. + returned: success + type: bool + branchName: + description: + - Name of the branch to build. Exactly one a of branch name, tag, or commit + SHA must be provided. + - This field is a regular expression. + returned: success + type: str + tagName: + description: + - Name of the tag to build. Exactly one of a branch name, tag, or commit SHA + must be provided. + - This field is a regular expression. + returned: success + type: str + commitSha: + description: + - Explicit commit SHA to build. Exactly one of a branch name, tag, or commit + SHA must be provided. + returned: success + type: str +github: + description: + - Describes the configuration of a trigger that creates a build whenever a GitHub + event is received. + returned: success + type: complex + contains: + owner: + description: + - 'Owner of the repository. For example: The owner for U(https://github.com/googlecloudplatform/cloud-builders) + is "googlecloudplatform".' + returned: success + type: str + name: + description: + - 'Name of the repository. For example: The name for U(https://github.com/googlecloudplatform/cloud-builders) + is "cloud-builders".' + returned: success + type: str + pullRequest: + description: + - filter to match changes in pull requests. Specify only one of pullRequest + or push. + returned: success + type: complex + contains: + branch: + description: + - Regex of branches to match. + returned: success + type: str + commentControl: + description: + - Whether to block builds on a "/gcbrun" comment from a repository owner + or collaborator. + returned: success + type: str + invertRegex: + description: + - If true, branches that do NOT match the git_ref will trigger a build. + returned: success + type: bool + push: + description: + - filter to match changes in refs, like branches or tags. Specify only one of + pullRequest or push. + returned: success + type: complex + contains: + invertRegex: + description: + - When true, only trigger a build if the revision regex does NOT match the + git_ref regex. + returned: success + type: bool + branch: + description: + - Regex of branches to match. Specify only one of branch or tag. + returned: success + type: str + tag: + description: + - Regex of tags to match. Specify only one of branch or tag. + returned: success + type: str +pubsubConfig: + description: + - PubsubConfig describes the configuration of a trigger that creates a build whenever + a Pub/Sub message is published. + returned: success + type: complex + contains: + subscription: + description: + - Output only. Name of the subscription. + returned: success + type: str + topic: + description: + - The name of the topic from which this subscription is receiving messages. + returned: success + type: str + service_account_email: + description: + - Service account that will make the push request. + returned: success + type: str + state: + description: + - Potential issues with the underlying Pub/Sub subscription configuration. + - Only populated on get requests. + returned: success + type: str +webhookConfig: + description: + - WebhookConfig describes the configuration of a trigger that creates a build whenever + a webhook is sent to a trigger's webhook URL. + returned: success + type: complex + contains: + secret: + description: + - Resource name for the secret required as a URL parameter. + returned: success + type: str + state: + description: + - Potential issues with the underlying Pub/Sub subscription configuration. + - Only populated on get requests. + returned: success + type: str +build: + description: + - Contents of the build template. Either a filename or build template must be provided. + returned: success + type: complex + contains: + source: + description: + - The location of the source files to build. + returned: success + type: complex + contains: + storageSource: + description: + - Location of the source in an archive file in Google Cloud Storage. + returned: success + type: complex + contains: + bucket: + description: + - Google Cloud Storage bucket containing the source. + returned: success + type: str + object: + description: + - Google Cloud Storage object containing the source. + - This object must be a gzipped archive file (.tar.gz) containing source + to build. + returned: success + type: str + generation: + description: + - Google Cloud Storage generation for the object. If the generation + is omitted, the latest generation will be used . + returned: success + type: str + repoSource: + description: + - Location of the source in a Google Cloud Source Repository. + returned: success + type: complex + contains: + projectId: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, + the project ID requesting the build is assumed. + returned: success + type: str + repoName: + description: + - Name of the Cloud Source Repository. + returned: success + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and is + an absolute path, this value is ignored for that step's execution. + returned: success + type: str + invertRegex: + description: + - Only trigger a build if the revision regex does NOT match the revision + regex. + returned: success + type: bool + substitutions: + description: + - Substitutions to use in a triggered build. Should only be used with + triggers.run . + returned: success + type: dict + branchName: + description: + - Regex matching branches to build. Exactly one a of branch name, tag, + or commit SHA must be provided. + - The syntax of the regular expressions accepted is the syntax accepted + by RE2 and described at U(https://github.com/google/re2/wiki/Syntax) + . + returned: success + type: str + tagName: + description: + - Regex matching tags to build. Exactly one a of branch name, tag, or + commit SHA must be provided. + - The syntax of the regular expressions accepted is the syntax accepted + by RE2 and described at U(https://github.com/google/re2/wiki/Syntax) + . + returned: success + type: str + commitSha: + description: + - Explicit commit SHA to build. Exactly one a of branch name, tag, or + commit SHA must be provided. + returned: success + type: str + tags: + description: + - Tags for annotation of a Build. These are not docker tags. + returned: success + type: list + images: + description: + - A list of images to be pushed upon the successful completion of all build + steps. + - The images are pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's results + field. + - If any of the images fail to be pushed, the build status is marked FAILURE. + returned: success + type: list + substitutions: + description: + - Substitutions data for Build resource. + returned: success + type: dict + queueTtl: + description: + - TTL in queue for this build. If provided and the build is enqueued longer + than this value, the build will expire and the build status will be EXPIRED. + - The TTL starts ticking from createTime. + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: "3.5s".' + returned: success + type: str + logsBucket: + description: + - Google Cloud Storage bucket where logs should be written. Logs file names + will be of the format ${logsBucket}/log-${build_id}.txt. + returned: success + type: str + timeout: + description: + - Amount of time that this build should be allowed to run, to second granularity. + - If this amount of time elapses, work on the build will cease and the build + status will be TIMEOUT. + - This timeout must be equal to or greater than the sum of the timeouts for + build steps within the build. + - The expected format is the number of seconds followed by s. + - Default time is ten minutes (600s). + returned: success + type: str + secrets: + description: + - Secrets to decrypt using Cloud Key Management Service. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Cloud KMS key name to use to decrypt these envs. + returned: success + type: str + secretEnv: + description: + - Map of environment variable name to its encrypted value. + - Secret environment variables must be unique across all of a build's secrets, + and must be used by at least one build step. Values can be at most 64 + KB in size. There can be at most 100 secret values across all of a build's + secrets. + returned: success + type: dict + steps: + description: + - The operations to be performed on the workspace. + returned: success + type: complex + contains: + name: + description: + - The name of the container image that will run this particular build step. + - If the image is available in the host's Docker daemon's cache, it will + be run directly. If not, the host will attempt to pull the image first, + using the builder service account's credentials if necessary. + - The Docker daemon's cache will already have the latest versions of all + of the officially supported build steps (see U(https://github.com/GoogleCloudPlatform/cloud-builders) + for images and examples). + - The Docker daemon will also have cached many of the layers for some popular + images, like "ubuntu", "debian", but they will be refreshed at the time + you attempt to use them. + - If you built an image in a previous build step, it will be stored in the + host's Docker daemon's cache and is available to use as the name for a + later build step. + returned: success + type: str + args: + description: + - A list of arguments that will be presented to the step when it is started. + - If the image used to run the step's container has an entrypoint, the args + are used as arguments to that entrypoint. If the image does not define + an entrypoint, the first element in args is used as the entrypoint, and + the remainder will be used as arguments. + returned: success + type: list + env: + description: + - A list of environment variable definitions to be used when running a step. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + returned: success + type: list + id: + description: + - Unique identifier for this build step, used in `wait_for` to reference + this build step as a dependency. + returned: success + type: str + entrypoint: + description: + - Entrypoint to be used instead of the build step image's default entrypoint. + - If unset, the image's default entrypoint is used . + returned: success + type: str + dir: + description: + - Working directory to use when running this step's container. + - If this value is a relative path, it is relative to the build's working + directory. If this value is absolute, it may be outside the build's working + directory, in which case the contents of the path may not be persisted + across build step executions, unless a `volume` for that path is specified. + - If the build specifies a `RepoSource` with `dir` and a step with a `dir`, + which specifies an absolute path, the `RepoSource` `dir` is ignored for + the step's execution. + returned: success + type: str + secretEnv: + description: + - A list of environment variables which are encrypted using a Cloud Key + Management Service crypto key. These values must be specified in the build's + `Secret`. + returned: success + type: list + timeout: + description: + - Time limit for executing this build step. If not defined, the step has + no time limit and will be allowed to continue to run until either it completes + or the build itself times out. + returned: success + type: str + timing: + description: + - Output only. Stores timing information for executing this build step. + returned: success + type: str + volumes: + description: + - List of volumes to mount into the build step. + - Each volume is created as an empty volume prior to execution of the build + step. Upon completion of the build, volumes and their contents are discarded. + - Using a named volume in only one step is not valid as it is indicative + of a build request with an incorrect configuration. + returned: success + type: complex + contains: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. Each named volume must be used by at least two + build steps. + returned: success + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + returned: success + type: str + waitFor: + description: + - The ID(s) of the step(s) that this build step depends on. + - This build step will not start until all the build steps in `wait_for` + have completed successfully. If `wait_for` is empty, this build step will + start when all previous build steps in the `Build.Steps` list have completed + successfully. + returned: success + type: list + artifacts: + description: + - Artifacts produced by the build that should be uploaded upon successful completion + of all build steps. + returned: success + type: complex + contains: + images: + description: + - A list of images to be pushed upon the successful completion of all build + steps. + - The images will be pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's + results field. + - If any of the images fail to be pushed, the build is marked FAILURE. + returned: success + type: list + objects: + description: + - A list of objects to be uploaded to Cloud Storage upon successful completion + of all build steps. + - Files in the workspace matching specified paths globs will be uploaded + to the Cloud Storage location using the builder service account's credentials. + - The location and generation of the uploaded objects will be stored in + the Build resource's results field. + - If any objects fail to be pushed, the build is marked FAILURE. + returned: success + type: complex + contains: + location: + description: + - Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". + - Files in the workspace matching any path pattern will be uploaded + to Cloud Storage with this location as a prefix. + returned: success + type: str + paths: + description: + - Path globs used to match files in the build's workspace. + returned: success + type: list + timing: + description: + - Output only. Stores timing information for pushing all artifact objects. + returned: success + type: complex + contains: + startTime: + description: + - Start of time span. + - 'A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution + and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" + and "2014-10-02T15:01:23.045123456Z".' + returned: success + type: str + endTime: + description: + - End of time span. + - 'A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution + and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" + and "2014-10-02T15:01:23.045123456Z".' + returned: success + type: str + options: + description: + - Special options for this build. + returned: success + type: complex + contains: + sourceProvenanceHash: + description: + - Requested hash for SourceProvenance. + returned: success + type: list + requestedVerifyOption: + description: + - Requested verifiability options. + returned: success + type: str + machineType: + description: + - Compute Engine machine type on which to run the build. + returned: success + type: str + diskSizeGb: + description: + - Requested disk size for the VM that runs the build. Note that this is + NOT "disk free"; some of the space will be used by the operating system + and build utilities. + - Also note that this is the minimum disk size that will be allocated for + the build -- the build may run with a larger disk than requested. At present, + the maximum disk size is 1000GB; builds that request more than the maximum + are rejected with an error. + returned: success + type: int + substitutionOption: + description: + - Option to specify behavior when there is an error in the substitution + checks. + - NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot + be overridden in the build configuration file. + returned: success + type: str + dynamicSubstitutions: + description: + - Option to specify whether or not to apply bash style string operations + to the substitutions. + - NOTE this is always enabled for triggered builds and cannot be overridden + in the build configuration file. + returned: success + type: bool + logStreamingOption: + description: + - Option to define build log streaming behavior to Google Cloud Storage. + returned: success + type: str + workerPool: + description: + - Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} + This field is experimental. + returned: success + type: str + logging: + description: + - Option to specify the logging mode, which determines if and where build + logs are stored. + returned: success + type: str + env: + description: + - A list of global environment variable definitions that will exist for + all build steps in this build. If a variable is defined in both globally + and in a build step, the variable will use the build step value. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + returned: success + type: list + secretEnv: + description: + - A list of global environment variables, which are encrypted using a Cloud + Key Management Service crypto key. These values must be specified in the + build's Secret. These variables will be available to all build steps in + this build. + returned: success + type: list + volumes: + description: + - Global list of volumes to mount for ALL build steps Each volume is created + as an empty volume prior to starting the build process. + - Upon completion of the build, volumes and their contents are discarded. + Global volume names and paths cannot conflict with the volumes defined + a build step. + - Using a global volume in a build with only one step is not valid as it + is indicative of a build request with an incorrect configuration. + returned: success + type: complex + contains: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. + - Each named volume must be used by at least two build steps. + returned: success + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + tags=dict(type='list', elements='str'), + disabled=dict(type='bool'), + substitutions=dict(type='dict'), + filename=dict(type='str'), + ignored_files=dict(type='list', elements='str'), + included_files=dict(type='list', elements='str'), + trigger_template=dict( + type='dict', + options=dict( + project_id=dict(type='str'), + repo_name=dict(default='default', type='str'), + dir=dict(type='str'), + invert_regex=dict(type='bool'), + branch_name=dict(type='str'), + tag_name=dict(type='str'), + commit_sha=dict(type='str'), + ), + ), + github=dict( + type='dict', + options=dict( + owner=dict(type='str'), + name=dict(type='str'), + pull_request=dict( + type='dict', options=dict(branch=dict(required=True, type='str'), comment_control=dict(type='str'), invert_regex=dict(type='bool')) + ), + push=dict(type='dict', options=dict(invert_regex=dict(type='bool'), branch=dict(type='str'), tag=dict(type='str'))), + ), + ), + pubsub_config=dict(type='dict', options=dict(topic=dict(required=True, type='str'), service_account_email=dict(type='str'))), + webhook_config=dict(type='dict', options=dict(secret=dict(required=True, type='str'))), + build=dict( + type='dict', + options=dict( + source=dict( + type='dict', + options=dict( + storage_source=dict( + type='dict', + options=dict(bucket=dict(required=True, type='str'), object=dict(required=True, type='str'), generation=dict(type='str')), + ), + repo_source=dict( + type='dict', + options=dict( + project_id=dict(type='str'), + repo_name=dict(required=True, type='str'), + dir=dict(type='str'), + invert_regex=dict(type='bool'), + substitutions=dict(type='dict'), + branch_name=dict(type='str'), + tag_name=dict(type='str'), + commit_sha=dict(type='str'), + ), + ), + ), + ), + tags=dict(type='list', elements='str'), + images=dict(type='list', elements='str'), + substitutions=dict(type='dict'), + queue_ttl=dict(type='str'), + logs_bucket=dict(type='str'), + timeout=dict(default='600s', type='str'), + secrets=dict(type='list', elements='dict', options=dict(kms_key_name=dict(required=True, type='str'), secret_env=dict(type='dict'))), + steps=dict( + required=True, + type='list', + elements='dict', + options=dict( + name=dict(required=True, type='str'), + args=dict(type='list', elements='str'), + env=dict(type='list', elements='str'), + id=dict(type='str'), + entrypoint=dict(type='str'), + dir=dict(type='str'), + secret_env=dict(type='list', elements='str'), + timeout=dict(type='str'), + timing=dict(type='str'), + volumes=dict( + type='list', elements='dict', options=dict(name=dict(required=True, type='str'), path=dict(required=True, type='str')) + ), + wait_for=dict(type='list', elements='str'), + ), + ), + artifacts=dict( + type='dict', + options=dict( + images=dict(type='list', elements='str'), + objects=dict(type='dict', options=dict(location=dict(type='str'), paths=dict(type='list', elements='str'))), + ), + ), + options=dict( + type='dict', + options=dict( + source_provenance_hash=dict(type='list', elements='str'), + requested_verify_option=dict(type='str'), + machine_type=dict(type='str'), + disk_size_gb=dict(type='int'), + substitution_option=dict(type='str'), + dynamic_substitutions=dict(type='bool'), + log_streaming_option=dict(type='str'), + worker_pool=dict(type='str'), + logging=dict(type='str'), + env=dict(type='list', elements='str'), + secret_env=dict(type='list', elements='str'), + volumes=dict(type='list', elements='dict', options=dict(name=dict(type='str'), path=dict(type='str'))), + ), + ), + ), + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'cloudbuild') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'cloudbuild') + return return_if_object(module, auth.patch(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'cloudbuild') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'id': module.params.get('id'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'tags': module.params.get('tags'), + u'disabled': module.params.get('disabled'), + u'substitutions': module.params.get('substitutions'), + u'filename': module.params.get('filename'), + u'ignoredFiles': module.params.get('ignored_files'), + u'includedFiles': module.params.get('included_files'), + u'triggerTemplate': TriggerTriggertemplate(module.params.get('trigger_template', {}), module).to_request(), + u'github': TriggerGithub(module.params.get('github', {}), module).to_request(), + u'pubsubConfig': TriggerPubsubconfig(module.params.get('pubsub_config', {}), module).to_request(), + u'webhookConfig': TriggerWebhookconfig(module.params.get('webhook_config', {}), module).to_request(), + u'build': TriggerBuild(module.params.get('build', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'cloudbuild') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudbuild.googleapis.com/v1/projects/{project}/triggers/{id}".format(**module.params) + + +def collection(module): + return "https://cloudbuild.googleapis.com/v1/projects/{project}/triggers".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'tags': response.get(u'tags'), + u'disabled': response.get(u'disabled'), + u'createTime': response.get(u'createTime'), + u'substitutions': response.get(u'substitutions'), + u'filename': response.get(u'filename'), + u'ignoredFiles': response.get(u'ignoredFiles'), + u'includedFiles': response.get(u'includedFiles'), + u'triggerTemplate': TriggerTriggertemplate(response.get(u'triggerTemplate', {}), module).from_response(), + u'github': TriggerGithub(response.get(u'github', {}), module).from_response(), + u'pubsubConfig': TriggerPubsubconfig(response.get(u'pubsubConfig', {}), module).from_response(), + u'webhookConfig': TriggerWebhookconfig(response.get(u'webhookConfig', {}), module).from_response(), + u'build': TriggerBuild(response.get(u'build', {}), module).from_response(), + } + + +class TriggerTriggertemplate(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'projectId': self.request.get('project_id'), + u'repoName': self.request.get('repo_name'), + u'dir': self.request.get('dir'), + u'invertRegex': self.request.get('invert_regex'), + u'branchName': self.request.get('branch_name'), + u'tagName': self.request.get('tag_name'), + u'commitSha': self.request.get('commit_sha'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'projectId': self.request.get(u'projectId'), + u'repoName': self.request.get(u'repoName'), + u'dir': self.request.get(u'dir'), + u'invertRegex': self.request.get(u'invertRegex'), + u'branchName': self.request.get(u'branchName'), + u'tagName': self.request.get(u'tagName'), + u'commitSha': self.request.get(u'commitSha'), + } + ) + + +class TriggerGithub(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'owner': self.request.get('owner'), + u'name': self.request.get('name'), + u'pullRequest': TriggerPullrequest(self.request.get('pull_request', {}), self.module).to_request(), + u'push': TriggerPush(self.request.get('push', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'owner': self.request.get(u'owner'), + u'name': self.request.get(u'name'), + u'pullRequest': TriggerPullrequest(self.request.get(u'pullRequest', {}), self.module).from_response(), + u'push': TriggerPush(self.request.get(u'push', {}), self.module).from_response(), + } + ) + + +class TriggerPullrequest(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'branch': self.request.get('branch'), u'commentControl': self.request.get('comment_control'), u'invertRegex': self.request.get('invert_regex')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'branch': self.request.get(u'branch'), u'commentControl': self.request.get(u'commentControl'), u'invertRegex': self.request.get(u'invertRegex')} + ) + + +class TriggerPush(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'invertRegex': self.request.get('invert_regex'), u'branch': self.request.get('branch'), u'tag': self.request.get('tag')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'invertRegex': self.request.get(u'invertRegex'), u'branch': self.request.get(u'branch'), u'tag': self.request.get(u'tag')} + ) + + +class TriggerPubsubconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'topic': self.request.get('topic'), u'service_account_email': self.request.get('service_account_email')}) + + def from_response(self): + return remove_nones_from_dict({u'topic': self.request.get(u'topic'), u'service_account_email': self.request.get(u'service_account_email')}) + + +class TriggerWebhookconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'secret': self.request.get('secret')}) + + def from_response(self): + return remove_nones_from_dict({u'secret': self.request.get(u'secret')}) + + +class TriggerBuild(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'source': TriggerSource(self.request.get('source', {}), self.module).to_request(), + u'tags': self.request.get('tags'), + u'images': self.request.get('images'), + u'substitutions': self.request.get('substitutions'), + u'queueTtl': self.request.get('queue_ttl'), + u'logsBucket': self.request.get('logs_bucket'), + u'timeout': self.request.get('timeout'), + u'secrets': TriggerSecretsArray(self.request.get('secrets', []), self.module).to_request(), + u'steps': TriggerStepsArray(self.request.get('steps', []), self.module).to_request(), + u'artifacts': TriggerArtifacts(self.request.get('artifacts', {}), self.module).to_request(), + u'options': TriggerOptions(self.request.get('options', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'source': TriggerSource(self.request.get(u'source', {}), self.module).from_response(), + u'tags': self.request.get(u'tags'), + u'images': self.request.get(u'images'), + u'substitutions': self.request.get(u'substitutions'), + u'queueTtl': self.request.get(u'queueTtl'), + u'logsBucket': self.request.get(u'logsBucket'), + u'timeout': self.request.get(u'timeout'), + u'secrets': TriggerSecretsArray(self.request.get(u'secrets', []), self.module).from_response(), + u'steps': TriggerStepsArray(self.request.get(u'steps', []), self.module).from_response(), + u'artifacts': TriggerArtifacts(self.request.get(u'artifacts', {}), self.module).from_response(), + u'options': TriggerOptions(self.request.get(u'options', {}), self.module).from_response(), + } + ) + + +class TriggerSource(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'storageSource': TriggerStoragesource(self.request.get('storage_source', {}), self.module).to_request(), + u'repoSource': TriggerReposource(self.request.get('repo_source', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'storageSource': TriggerStoragesource(self.request.get(u'storageSource', {}), self.module).from_response(), + u'repoSource': TriggerReposource(self.request.get(u'repoSource', {}), self.module).from_response(), + } + ) + + +class TriggerStoragesource(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'bucket': self.request.get('bucket'), u'object': self.request.get('object'), u'generation': self.request.get('generation')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'bucket': self.request.get(u'bucket'), u'object': self.request.get(u'object'), u'generation': self.request.get(u'generation')} + ) + + +class TriggerReposource(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'projectId': self.request.get('project_id'), + u'repoName': self.request.get('repo_name'), + u'dir': self.request.get('dir'), + u'invertRegex': self.request.get('invert_regex'), + u'substitutions': self.request.get('substitutions'), + u'branchName': self.request.get('branch_name'), + u'tagName': self.request.get('tag_name'), + u'commitSha': self.request.get('commit_sha'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'projectId': self.request.get(u'projectId'), + u'repoName': self.request.get(u'repoName'), + u'dir': self.request.get(u'dir'), + u'invertRegex': self.request.get(u'invertRegex'), + u'substitutions': self.request.get(u'substitutions'), + u'branchName': self.request.get(u'branchName'), + u'tagName': self.request.get(u'tagName'), + u'commitSha': self.request.get(u'commitSha'), + } + ) + + +class TriggerSecretsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'kmsKeyName': item.get('kms_key_name'), u'secretEnv': item.get('secret_env')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'kmsKeyName': item.get(u'kmsKeyName'), u'secretEnv': item.get(u'secretEnv')}) + + +class TriggerStepsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get('name'), + u'args': item.get('args'), + u'env': item.get('env'), + u'id': item.get('id'), + u'entrypoint': item.get('entrypoint'), + u'dir': item.get('dir'), + u'secretEnv': item.get('secret_env'), + u'timeout': item.get('timeout'), + u'timing': item.get('timing'), + u'volumes': TriggerVolumesArray(item.get('volumes', []), self.module).to_request(), + u'waitFor': item.get('wait_for'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get(u'name'), + u'args': item.get(u'args'), + u'env': item.get(u'env'), + u'id': item.get(u'id'), + u'entrypoint': item.get(u'entrypoint'), + u'dir': item.get(u'dir'), + u'secretEnv': item.get(u'secretEnv'), + u'timeout': item.get(u'timeout'), + u'timing': item.get(u'timing'), + u'volumes': TriggerVolumesArray(item.get(u'volumes', []), self.module).from_response(), + u'waitFor': item.get(u'waitFor'), + } + ) + + +class TriggerVolumesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'path': item.get('path')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'path': item.get(u'path')}) + + +class TriggerArtifacts(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'images': self.request.get('images'), u'objects': TriggerObjects(self.request.get('objects', {}), self.module).to_request()} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'images': self.request.get(u'images'), u'objects': TriggerObjects(self.request.get(u'objects', {}), self.module).from_response()} + ) + + +class TriggerObjects(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'location': self.request.get('location'), u'paths': self.request.get('paths')}) + + def from_response(self): + return remove_nones_from_dict({u'location': self.request.get(u'location'), u'paths': self.request.get(u'paths')}) + + +class TriggerTiming(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'startTime': self.request.get('start_time'), u'endTime': self.request.get('end_time')}) + + def from_response(self): + return remove_nones_from_dict({u'startTime': self.request.get(u'startTime'), u'endTime': self.request.get(u'endTime')}) + + +class TriggerOptions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'sourceProvenanceHash': self.request.get('source_provenance_hash'), + u'requestedVerifyOption': self.request.get('requested_verify_option'), + u'machineType': self.request.get('machine_type'), + u'diskSizeGb': self.request.get('disk_size_gb'), + u'substitutionOption': self.request.get('substitution_option'), + u'dynamicSubstitutions': self.request.get('dynamic_substitutions'), + u'logStreamingOption': self.request.get('log_streaming_option'), + u'workerPool': self.request.get('worker_pool'), + u'logging': self.request.get('logging'), + u'env': self.request.get('env'), + u'secretEnv': self.request.get('secret_env'), + u'volumes': TriggerVolumesArray(self.request.get('volumes', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'sourceProvenanceHash': self.request.get(u'sourceProvenanceHash'), + u'requestedVerifyOption': self.request.get(u'requestedVerifyOption'), + u'machineType': self.request.get(u'machineType'), + u'diskSizeGb': self.request.get(u'diskSizeGb'), + u'substitutionOption': self.request.get(u'substitutionOption'), + u'dynamicSubstitutions': self.request.get(u'dynamicSubstitutions'), + u'logStreamingOption': self.request.get(u'logStreamingOption'), + u'workerPool': self.request.get(u'workerPool'), + u'logging': self.request.get(u'logging'), + u'env': self.request.get(u'env'), + u'secretEnv': self.request.get(u'secretEnv'), + u'volumes': TriggerVolumesArray(self.request.get(u'volumes', []), self.module).from_response(), + } + ) + + +class TriggerVolumesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'path': item.get('path')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'path': item.get(u'path')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger_info.py new file mode 100644 index 000000000..78c4990aa --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudbuild_trigger_info.py @@ -0,0 +1,850 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudbuild_trigger_info +description: +- Gather info for GCP Trigger +short_description: Gather info for GCP Trigger +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a trigger + gcp_cloudbuild_trigger_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the trigger. + returned: success + type: str + name: + description: + - Name of the trigger. Must be unique within the project. + returned: success + type: str + description: + description: + - Human-readable description of the trigger. + returned: success + type: str + tags: + description: + - Tags for annotation of a BuildTrigger . + returned: success + type: list + disabled: + description: + - Whether the trigger is disabled or not. If true, the trigger will never result + in a build. + returned: success + type: bool + createTime: + description: + - Time when the trigger was created. + returned: success + type: str + substitutions: + description: + - Substitutions data for Build resource. + returned: success + type: dict + filename: + description: + - Path, from the source root, to a file whose contents is used for the template. + Either a filename or build template must be provided. + returned: success + type: str + ignoredFiles: + description: + - ignoredFiles and includedFiles are file glob matches using U(https://golang.org/pkg/path/filepath/#Match) + extended with support for `**`. + - If ignoredFiles and changed files are both empty, then they are not used to + determine whether or not to trigger a build. + - If ignoredFiles is not empty, then we ignore any files that match any of the + ignored_file globs. If the change has no files that are outside of the ignoredFiles + globs, then we do not trigger a build. + returned: success + type: list + includedFiles: + description: + - ignoredFiles and includedFiles are file glob matches using U(https://golang.org/pkg/path/filepath/#Match) + extended with support for `**`. + - If any of the files altered in the commit pass the ignoredFiles filter and + includedFiles is empty, then as far as this filter is concerned, we should + trigger the build. + - If any of the files altered in the commit pass the ignoredFiles filter and + includedFiles is not empty, then we make sure that at least one of those files + matches a includedFiles glob. If not, then we do not trigger a build. + returned: success + type: list + triggerTemplate: + description: + - Template describing the types of source changes to trigger a build. + - Branch and tag names in trigger templates are interpreted as regular expressions. + Any branch or tag change that matches that regular expression will trigger + a build. + returned: success + type: complex + contains: + projectId: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, the + project ID requesting the build is assumed. + returned: success + type: str + repoName: + description: + - Name of the Cloud Source Repository. If omitted, the name "default" is + assumed. + returned: success + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and is an absolute + path, this value is ignored for that step's execution. + returned: success + type: str + invertRegex: + description: + - Only trigger a build if the revision regex does NOT match the revision + regex. + returned: success + type: bool + branchName: + description: + - Name of the branch to build. Exactly one a of branch name, tag, or commit + SHA must be provided. + - This field is a regular expression. + returned: success + type: str + tagName: + description: + - Name of the tag to build. Exactly one of a branch name, tag, or commit + SHA must be provided. + - This field is a regular expression. + returned: success + type: str + commitSha: + description: + - Explicit commit SHA to build. Exactly one of a branch name, tag, or commit + SHA must be provided. + returned: success + type: str + github: + description: + - Describes the configuration of a trigger that creates a build whenever a GitHub + event is received. + returned: success + type: complex + contains: + owner: + description: + - 'Owner of the repository. For example: The owner for U(https://github.com/googlecloudplatform/cloud-builders) + is "googlecloudplatform".' + returned: success + type: str + name: + description: + - 'Name of the repository. For example: The name for U(https://github.com/googlecloudplatform/cloud-builders) + is "cloud-builders".' + returned: success + type: str + pullRequest: + description: + - filter to match changes in pull requests. Specify only one of pullRequest + or push. + returned: success + type: complex + contains: + branch: + description: + - Regex of branches to match. + returned: success + type: str + commentControl: + description: + - Whether to block builds on a "/gcbrun" comment from a repository owner + or collaborator. + returned: success + type: str + invertRegex: + description: + - If true, branches that do NOT match the git_ref will trigger a build. + returned: success + type: bool + push: + description: + - filter to match changes in refs, like branches or tags. Specify only one + of pullRequest or push. + returned: success + type: complex + contains: + invertRegex: + description: + - When true, only trigger a build if the revision regex does NOT match + the git_ref regex. + returned: success + type: bool + branch: + description: + - Regex of branches to match. Specify only one of branch or tag. + returned: success + type: str + tag: + description: + - Regex of tags to match. Specify only one of branch or tag. + returned: success + type: str + pubsubConfig: + description: + - PubsubConfig describes the configuration of a trigger that creates a build + whenever a Pub/Sub message is published. + returned: success + type: complex + contains: + subscription: + description: + - Output only. Name of the subscription. + returned: success + type: str + topic: + description: + - The name of the topic from which this subscription is receiving messages. + returned: success + type: str + service_account_email: + description: + - Service account that will make the push request. + returned: success + type: str + state: + description: + - Potential issues with the underlying Pub/Sub subscription configuration. + - Only populated on get requests. + returned: success + type: str + webhookConfig: + description: + - WebhookConfig describes the configuration of a trigger that creates a build + whenever a webhook is sent to a trigger's webhook URL. + returned: success + type: complex + contains: + secret: + description: + - Resource name for the secret required as a URL parameter. + returned: success + type: str + state: + description: + - Potential issues with the underlying Pub/Sub subscription configuration. + - Only populated on get requests. + returned: success + type: str + build: + description: + - Contents of the build template. Either a filename or build template must be + provided. + returned: success + type: complex + contains: + source: + description: + - The location of the source files to build. + returned: success + type: complex + contains: + storageSource: + description: + - Location of the source in an archive file in Google Cloud Storage. + returned: success + type: complex + contains: + bucket: + description: + - Google Cloud Storage bucket containing the source. + returned: success + type: str + object: + description: + - Google Cloud Storage object containing the source. + - This object must be a gzipped archive file (.tar.gz) containing + source to build. + returned: success + type: str + generation: + description: + - Google Cloud Storage generation for the object. If the generation + is omitted, the latest generation will be used . + returned: success + type: str + repoSource: + description: + - Location of the source in a Google Cloud Source Repository. + returned: success + type: complex + contains: + projectId: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, + the project ID requesting the build is assumed. + returned: success + type: str + repoName: + description: + - Name of the Cloud Source Repository. + returned: success + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and + is an absolute path, this value is ignored for that step's execution. + returned: success + type: str + invertRegex: + description: + - Only trigger a build if the revision regex does NOT match the + revision regex. + returned: success + type: bool + substitutions: + description: + - Substitutions to use in a triggered build. Should only be used + with triggers.run . + returned: success + type: dict + branchName: + description: + - Regex matching branches to build. Exactly one a of branch name, + tag, or commit SHA must be provided. + - The syntax of the regular expressions accepted is the syntax accepted + by RE2 and described at U(https://github.com/google/re2/wiki/Syntax) + . + returned: success + type: str + tagName: + description: + - Regex matching tags to build. Exactly one a of branch name, tag, + or commit SHA must be provided. + - The syntax of the regular expressions accepted is the syntax accepted + by RE2 and described at U(https://github.com/google/re2/wiki/Syntax) + . + returned: success + type: str + commitSha: + description: + - Explicit commit SHA to build. Exactly one a of branch name, tag, + or commit SHA must be provided. + returned: success + type: str + tags: + description: + - Tags for annotation of a Build. These are not docker tags. + returned: success + type: list + images: + description: + - A list of images to be pushed upon the successful completion of all build + steps. + - The images are pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's + results field. + - If any of the images fail to be pushed, the build status is marked FAILURE. + returned: success + type: list + substitutions: + description: + - Substitutions data for Build resource. + returned: success + type: dict + queueTtl: + description: + - TTL in queue for this build. If provided and the build is enqueued longer + than this value, the build will expire and the build status will be EXPIRED. + - The TTL starts ticking from createTime. + - 'A duration in seconds with up to nine fractional digits, terminated by + ''s''. Example: "3.5s".' + returned: success + type: str + logsBucket: + description: + - Google Cloud Storage bucket where logs should be written. Logs file names + will be of the format ${logsBucket}/log-${build_id}.txt. + returned: success + type: str + timeout: + description: + - Amount of time that this build should be allowed to run, to second granularity. + - If this amount of time elapses, work on the build will cease and the build + status will be TIMEOUT. + - This timeout must be equal to or greater than the sum of the timeouts + for build steps within the build. + - The expected format is the number of seconds followed by s. + - Default time is ten minutes (600s). + returned: success + type: str + secrets: + description: + - Secrets to decrypt using Cloud Key Management Service. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Cloud KMS key name to use to decrypt these envs. + returned: success + type: str + secretEnv: + description: + - Map of environment variable name to its encrypted value. + - Secret environment variables must be unique across all of a build's + secrets, and must be used by at least one build step. Values can be + at most 64 KB in size. There can be at most 100 secret values across + all of a build's secrets. + returned: success + type: dict + steps: + description: + - The operations to be performed on the workspace. + returned: success + type: complex + contains: + name: + description: + - The name of the container image that will run this particular build + step. + - If the image is available in the host's Docker daemon's cache, it + will be run directly. If not, the host will attempt to pull the image + first, using the builder service account's credentials if necessary. + - The Docker daemon's cache will already have the latest versions of + all of the officially supported build steps (see U(https://github.com/GoogleCloudPlatform/cloud-builders) + for images and examples). + - The Docker daemon will also have cached many of the layers for some + popular images, like "ubuntu", "debian", but they will be refreshed + at the time you attempt to use them. + - If you built an image in a previous build step, it will be stored + in the host's Docker daemon's cache and is available to use as the + name for a later build step. + returned: success + type: str + args: + description: + - A list of arguments that will be presented to the step when it is + started. + - If the image used to run the step's container has an entrypoint, the + args are used as arguments to that entrypoint. If the image does not + define an entrypoint, the first element in args is used as the entrypoint, + and the remainder will be used as arguments. + returned: success + type: list + env: + description: + - A list of environment variable definitions to be used when running + a step. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + returned: success + type: list + id: + description: + - Unique identifier for this build step, used in `wait_for` to reference + this build step as a dependency. + returned: success + type: str + entrypoint: + description: + - Entrypoint to be used instead of the build step image's default entrypoint. + - If unset, the image's default entrypoint is used . + returned: success + type: str + dir: + description: + - Working directory to use when running this step's container. + - If this value is a relative path, it is relative to the build's working + directory. If this value is absolute, it may be outside the build's + working directory, in which case the contents of the path may not + be persisted across build step executions, unless a `volume` for that + path is specified. + - If the build specifies a `RepoSource` with `dir` and a step with a + `dir`, which specifies an absolute path, the `RepoSource` `dir` is + ignored for the step's execution. + returned: success + type: str + secretEnv: + description: + - A list of environment variables which are encrypted using a Cloud + Key Management Service crypto key. These values must be specified + in the build's `Secret`. + returned: success + type: list + timeout: + description: + - Time limit for executing this build step. If not defined, the step + has no time limit and will be allowed to continue to run until either + it completes or the build itself times out. + returned: success + type: str + timing: + description: + - Output only. Stores timing information for executing this build step. + returned: success + type: str + volumes: + description: + - List of volumes to mount into the build step. + - Each volume is created as an empty volume prior to execution of the + build step. Upon completion of the build, volumes and their contents + are discarded. + - Using a named volume in only one step is not valid as it is indicative + of a build request with an incorrect configuration. + returned: success + type: complex + contains: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. Each named volume must be used by at least + two build steps. + returned: success + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + returned: success + type: str + waitFor: + description: + - The ID(s) of the step(s) that this build step depends on. + - This build step will not start until all the build steps in `wait_for` + have completed successfully. If `wait_for` is empty, this build step + will start when all previous build steps in the `Build.Steps` list + have completed successfully. + returned: success + type: list + artifacts: + description: + - Artifacts produced by the build that should be uploaded upon successful + completion of all build steps. + returned: success + type: complex + contains: + images: + description: + - A list of images to be pushed upon the successful completion of all + build steps. + - The images will be pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's + results field. + - If any of the images fail to be pushed, the build is marked FAILURE. + returned: success + type: list + objects: + description: + - A list of objects to be uploaded to Cloud Storage upon successful + completion of all build steps. + - Files in the workspace matching specified paths globs will be uploaded + to the Cloud Storage location using the builder service account's + credentials. + - The location and generation of the uploaded objects will be stored + in the Build resource's results field. + - If any objects fail to be pushed, the build is marked FAILURE. + returned: success + type: complex + contains: + location: + description: + - Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". + - Files in the workspace matching any path pattern will be uploaded + to Cloud Storage with this location as a prefix. + returned: success + type: str + paths: + description: + - Path globs used to match files in the build's workspace. + returned: success + type: list + timing: + description: + - Output only. Stores timing information for pushing all artifact + objects. + returned: success + type: complex + contains: + startTime: + description: + - Start of time span. + - 'A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" + and "2014-10-02T15:01:23.045123456Z".' + returned: success + type: str + endTime: + description: + - End of time span. + - 'A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" + and "2014-10-02T15:01:23.045123456Z".' + returned: success + type: str + options: + description: + - Special options for this build. + returned: success + type: complex + contains: + sourceProvenanceHash: + description: + - Requested hash for SourceProvenance. + returned: success + type: list + requestedVerifyOption: + description: + - Requested verifiability options. + returned: success + type: str + machineType: + description: + - Compute Engine machine type on which to run the build. + returned: success + type: str + diskSizeGb: + description: + - Requested disk size for the VM that runs the build. Note that this + is NOT "disk free"; some of the space will be used by the operating + system and build utilities. + - Also note that this is the minimum disk size that will be allocated + for the build -- the build may run with a larger disk than requested. + At present, the maximum disk size is 1000GB; builds that request more + than the maximum are rejected with an error. + returned: success + type: int + substitutionOption: + description: + - Option to specify behavior when there is an error in the substitution + checks. + - NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot + be overridden in the build configuration file. + returned: success + type: str + dynamicSubstitutions: + description: + - Option to specify whether or not to apply bash style string operations + to the substitutions. + - NOTE this is always enabled for triggered builds and cannot be overridden + in the build configuration file. + returned: success + type: bool + logStreamingOption: + description: + - Option to define build log streaming behavior to Google Cloud Storage. + returned: success + type: str + workerPool: + description: + - Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} + This field is experimental. + returned: success + type: str + logging: + description: + - Option to specify the logging mode, which determines if and where + build logs are stored. + returned: success + type: str + env: + description: + - A list of global environment variable definitions that will exist + for all build steps in this build. If a variable is defined in both + globally and in a build step, the variable will use the build step + value. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + returned: success + type: list + secretEnv: + description: + - A list of global environment variables, which are encrypted using + a Cloud Key Management Service crypto key. These values must be specified + in the build's Secret. These variables will be available to all build + steps in this build. + returned: success + type: list + volumes: + description: + - Global list of volumes to mount for ALL build steps Each volume is + created as an empty volume prior to starting the build process. + - Upon completion of the build, volumes and their contents are discarded. + Global volume names and paths cannot conflict with the volumes defined + a build step. + - Using a global volume in a build with only one step is not valid as + it is indicative of a build request with an incorrect configuration. + returned: success + type: complex + contains: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. + - Each named volume must be used by at least two build steps. + returned: success + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudbuild.googleapis.com/v1/projects/{project}/triggers".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'cloudbuild') + return auth.list(link, return_if_object, array_name='triggers') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function.py new file mode 100644 index 000000000..0b3c38ce1 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function.py @@ -0,0 +1,741 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: gcp_cloudfunctions_cloud_function +description: +- A Cloud Function that contains user computation executed in response to an event. +short_description: Creates a GCP CloudFunction +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - A user-defined name of the function. Function names must be unique globally + and match pattern `projects/*/locations/*/functions/*`. + required: true + type: str + description: + description: + - User-provided description of a function. + required: false + type: str + entry_point: + description: + - The name of the function (as defined in source code) that will be executed. + - Defaults to the resource name suffix, if not specified. For backward compatibility, + if function with given name is not found, then the system will try to use function + named "function". For Node.js this is name of a function exported by the module + specified in source_location. + required: false + type: str + runtime: + description: + - The runtime in which to run the function. Required when deploying a new function, + optional when updating an existing function. + required: false + type: str + timeout: + description: + - The function execution timeout. Execution is considered failed and can be terminated + if the function is not completed at the end of the timeout period. Defaults + to 60 seconds. + required: false + type: str + available_memory_mb: + description: + - The amount of memory in MB available for a function. + required: false + type: int + labels: + description: + - A set of key/value label pairs associated with this Cloud Function. + required: false + type: dict + environment_variables: + description: + - Environment variables that shall be available during function execution. + required: false + type: dict + source_archive_url: + description: + - The Google Cloud Storage URL, starting with gs://, pointing to the zip archive + which contains the function. + required: false + type: str + source_upload_url: + description: + - The Google Cloud Storage signed URL used for source uploading. + required: false + type: str + source_repository: + description: + - The source repository where a function is hosted. + required: false + type: dict + suboptions: + url: + description: + - The URL pointing to the hosted repository where the function is defined + . + required: true + type: str + https_trigger: + description: + - An HTTPS endpoint type of source that can be triggered via URL. + required: false + type: dict + suboptions: {} + event_trigger: + description: + - An HTTPS endpoint type of source that can be triggered via URL. + required: false + type: dict + suboptions: + event_type: + description: + - 'The type of event to observe. For example: `providers/cloud.storage/eventTypes/object.change` + and `providers/cloud.pubsub/eventTypes/topic.publish`.' + required: true + type: str + resource: + description: + - The resource(s) from which to observe events, for example, `projects/_/buckets/myBucket.` + . + required: true + type: str + service: + description: + - The hostname of the service that should be observed. + required: false + type: str + location: + description: + - The location of this cloud function. + required: true + type: str + trigger_http: + description: + - Use HTTP to trigger this function. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +""" + +EXAMPLES = """ +- name: create a cloud function + google.cloud.gcp_cloudfunctions_cloud_function: + name: test_object + location: us-central1 + entry_point: helloGET + source_archive_url: gs://ansible-cloudfunctions-bucket/function.zip + trigger_http: 'true' + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +""" + +RETURN = """ +name: + description: + - A user-defined name of the function. Function names must be unique globally and + match pattern `projects/*/locations/*/functions/*`. + returned: success + type: str +description: + description: + - User-provided description of a function. + returned: success + type: str +status: + description: + - Status of the function deployment. + returned: success + type: str +entryPoint: + description: + - The name of the function (as defined in source code) that will be executed. + - Defaults to the resource name suffix, if not specified. For backward compatibility, + if function with given name is not found, then the system will try to use function + named "function". For Node.js this is name of a function exported by the module + specified in source_location. + returned: success + type: str +runtime: + description: + - The runtime in which the function is going to run. If empty, defaults to Node.js + 6. + returned: success + type: str +timeout: + description: + - The function execution timeout. Execution is considered failed and can be terminated + if the function is not completed at the end of the timeout period. Defaults to + 60 seconds. + returned: success + type: str +availableMemoryMb: + description: + - The amount of memory in MB available for a function. + returned: success + type: int +serviceAccountEmail: + description: + - The email of the service account for this function. + returned: success + type: str +updateTime: + description: + - The last update timestamp of a Cloud Function. + returned: success + type: str +versionId: + description: + - The version identifier of the Cloud Function. Each deployment attempt results + in a new version of a function being created. + returned: success + type: str +labels: + description: + - A set of key/value label pairs associated with this Cloud Function. + returned: success + type: dict +environmentVariables: + description: + - Environment variables that shall be available during function execution. + returned: success + type: dict +sourceArchiveUrl: + description: + - The Google Cloud Storage URL, starting with gs://, pointing to the zip archive + which contains the function. + returned: success + type: str +sourceUploadUrl: + description: + - The Google Cloud Storage signed URL used for source uploading. + returned: success + type: str +sourceRepository: + description: + - The source repository where a function is hosted. + returned: success + type: complex + contains: + url: + description: + - The URL pointing to the hosted repository where the function is defined . + returned: success + type: str + deployedUrl: + description: + - The URL pointing to the hosted repository where the function were defined + at the time of deployment. + returned: success + type: str +httpsTrigger: + description: + - An HTTPS endpoint type of source that can be triggered via URL. + returned: success + type: complex + contains: + url: + description: + - The deployed url for the function. + returned: success + type: str +eventTrigger: + description: + - An HTTPS endpoint type of source that can be triggered via URL. + returned: success + type: complex + contains: + eventType: + description: + - 'The type of event to observe. For example: `providers/cloud.storage/eventTypes/object.change` + and `providers/cloud.pubsub/eventTypes/topic.publish`.' + returned: success + type: str + resource: + description: + - The resource(s) from which to observe events, for example, `projects/_/buckets/myBucket.` + . + returned: success + type: str + service: + description: + - The hostname of the service that should be observed. + returned: success + type: str +location: + description: + - The location of this cloud function. + returned: success + type: str +trigger_http: + description: + - Use HTTP to trigger this function. + returned: success + type: bool +""" + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"], type="str"), + name=dict(required=True, type="str"), + description=dict(type="str"), + entry_point=dict(type="str"), + runtime=dict(type="str"), + timeout=dict(type="str"), + available_memory_mb=dict(type="int"), + labels=dict(type="dict"), + environment_variables=dict(type="dict"), + source_archive_url=dict(type="str"), + source_upload_url=dict(type="str"), + source_repository=dict( + type="dict", options=dict(url=dict(required=True, type="str")) + ), + https_trigger=dict(type="dict", options=dict()), + event_trigger=dict( + type="dict", + options=dict( + event_type=dict(required=True, type="str"), + resource=dict(required=True, type="str"), + service=dict(type="str"), + ), + ), + location=dict(required=True, type="str"), + trigger_http=dict(type="bool"), + ) + ) + + if not module.params["scopes"]: + module.params["scopes"] = ["https://www.googleapis.com/auth/cloud-platform"] + + state = module.params["state"] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + # Need to set triggerHttps to {} if boolean true. + if fetch and fetch.get("httpsTrigger") and module.params["trigger_http"]: + module.params["https_trigger"] = fetch.get("httpsTrigger") + elif module.params["trigger_http"]: + module.params["https_trigger"] = {} + + if fetch: + if state == "present": + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == "present": + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({"changed": changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, "cloudfunctions") + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, "cloudfunctions") + params = { + "updateMask": updateMask( + resource_to_request(module), response_to_hash(module, fetch) + ) + } + request = resource_to_request(module) + del request["name"] + return wait_for_operation(module, auth.put(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get("name") != response.get("name"): + update_mask.append("name") + if request.get("description") != response.get("description"): + update_mask.append("description") + if request.get("entryPoint") != response.get("entryPoint"): + update_mask.append("entryPoint") + if request.get("runtime") != response.get("runtime"): + update_mask.append("runtime") + if request.get("timeout") != response.get("timeout"): + update_mask.append("timeout") + if request.get("availableMemoryMb") != response.get("availableMemoryMb"): + update_mask.append("availableMemoryMb") + if request.get("labels") != response.get("labels"): + update_mask.append("labels") + if request.get("environmentVariables") != response.get("environmentVariables"): + update_mask.append("environmentVariables") + if request.get("sourceArchiveUrl") != response.get("sourceArchiveUrl"): + update_mask.append("sourceArchiveUrl") + if request.get("sourceUploadUrl") != response.get("sourceUploadUrl"): + update_mask.append("sourceUploadUrl") + if request.get("sourceRepository") != response.get("sourceRepository"): + update_mask.append("sourceRepository") + if request.get("httpsTrigger") != response.get("httpsTrigger"): + update_mask.append("httpsTrigger") + if request.get("eventTrigger") != response.get("eventTrigger"): + update_mask.append("eventTrigger") + if request.get("location") != response.get("location"): + update_mask.append("location") + if request.get("trigger_http") != response.get("trigger_http"): + update_mask.append("trigger_http") + return ",".join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, "cloudfunctions") + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + "name": name_pattern(module.params.get("name"), module), + "description": module.params.get("description"), + "entryPoint": module.params.get("entry_point"), + "runtime": module.params.get("runtime"), + "timeout": module.params.get("timeout"), + "availableMemoryMb": module.params.get("available_memory_mb"), + "labels": module.params.get("labels"), + "environmentVariables": module.params.get("environment_variables"), + "sourceArchiveUrl": module.params.get("source_archive_url"), + "sourceUploadUrl": module.params.get("source_upload_url"), + "sourceRepository": CloudFunctionSourcerepository( + module.params.get("source_repository", {}), module + ).to_request(), + "httpsTrigger": CloudFunctionHttpstrigger( + module.params.get("https_trigger", {}), module + ).to_request(), + "eventTrigger": CloudFunctionEventtrigger( + module.params.get("event_trigger", {}), module + ).to_request(), + } + request = encode_request(request, module) + return request + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, "cloudfunctions") + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudfunctions.googleapis.com/v1/projects/{project}/locations/{location}/functions/{name}".format( + **module.params + ) + + +def collection(module): + return "https://cloudfunctions.googleapis.com/v1/projects/{project}/locations/{location}/functions".format( + **module.params + ) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, "JSONDecodeError", ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ["error", "errors"]): + module.fail_json(msg=navigate_hash(result, ["error", "errors"])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + "name": response.get("name"), + "description": response.get("description"), + "status": response.get("status"), + "entryPoint": response.get("entryPoint"), + "runtime": response.get("runtime"), + "timeout": response.get("timeout"), + "availableMemoryMb": response.get("availableMemoryMb"), + "serviceAccountEmail": response.get("serviceAccountEmail"), + "updateTime": response.get("updateTime"), + "versionId": response.get("versionId"), + "labels": response.get("labels"), + "environmentVariables": response.get("environmentVariables"), + "sourceArchiveUrl": response.get("sourceArchiveUrl"), + "sourceUploadUrl": response.get("sourceUploadUrl"), + "sourceRepository": CloudFunctionSourcerepository( + response.get("sourceRepository", {}), module + ).from_response(), + "httpsTrigger": CloudFunctionHttpstrigger( + response.get("httpsTrigger", {}), module + ).from_response(), + "eventTrigger": CloudFunctionEventtrigger( + response.get("eventTrigger", {}), module + ).from_response(), + } + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/locations/.*/functions/.*" + + if not re.match(regex, name): + name = "projects/{project}/locations/{location}/functions/{name}".format( + **module.params + ) + + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://cloudfunctions.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ["done"]) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ["error"], module) + return navigate_hash(wait_done, ["response"]) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ["name"]) + op_uri = async_op_url(module, {"op_id": op_id}) + while not status: + raise_if_errors(op_result, ["error"], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ["done"]) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def encode_request(request, module): + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + if module.params["trigger_http"] and not return_vals.get("httpsTrigger"): + return_vals["httpsTrigger"] = {} + + return return_vals + + +class CloudFunctionSourcerepository(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({"url": self.request.get("url")}) + + def from_response(self): + return remove_nones_from_dict({"url": self.request.get("url")}) + + +class CloudFunctionHttpstrigger(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({}) + + def from_response(self): + return remove_nones_from_dict({}) + + +class CloudFunctionEventtrigger(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "eventType": self.request.get("event_type"), + "resource": self.request.get("resource"), + "service": self.request.get("service"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "eventType": self.request.get("eventType"), + "resource": self.request.get("resource"), + "service": self.request.get("service"), + } + ) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function_info.py new file mode 100644 index 000000000..36fc75308 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudfunctions_cloud_function_info.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudfunctions_cloud_function_info +description: +- Gather info for GCP CloudFunction +short_description: Gather info for GCP CloudFunction +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location of this cloud function. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a cloud function + gcp_cloudfunctions_cloud_function_info: + location: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - A user-defined name of the function. Function names must be unique globally + and match pattern `projects/*/locations/*/functions/*`. + returned: success + type: str + description: + description: + - User-provided description of a function. + returned: success + type: str + status: + description: + - Status of the function deployment. + returned: success + type: str + entryPoint: + description: + - The name of the function (as defined in source code) that will be executed. + - Defaults to the resource name suffix, if not specified. For backward compatibility, + if function with given name is not found, then the system will try to use + function named "function". For Node.js this is name of a function exported + by the module specified in source_location. + returned: success + type: str + runtime: + description: + - The runtime in which the function is going to run. If empty, defaults to Node.js + 6. + returned: success + type: str + timeout: + description: + - The function execution timeout. Execution is considered failed and can be + terminated if the function is not completed at the end of the timeout period. + Defaults to 60 seconds. + returned: success + type: str + availableMemoryMb: + description: + - The amount of memory in MB available for a function. + returned: success + type: int + serviceAccountEmail: + description: + - The email of the service account for this function. + returned: success + type: str + updateTime: + description: + - The last update timestamp of a Cloud Function. + returned: success + type: str + versionId: + description: + - The version identifier of the Cloud Function. Each deployment attempt results + in a new version of a function being created. + returned: success + type: str + labels: + description: + - A set of key/value label pairs associated with this Cloud Function. + returned: success + type: dict + environmentVariables: + description: + - Environment variables that shall be available during function execution. + returned: success + type: dict + sourceArchiveUrl: + description: + - The Google Cloud Storage URL, starting with gs://, pointing to the zip archive + which contains the function. + returned: success + type: str + sourceUploadUrl: + description: + - The Google Cloud Storage signed URL used for source uploading. + returned: success + type: str + sourceRepository: + description: + - The source repository where a function is hosted. + returned: success + type: complex + contains: + url: + description: + - The URL pointing to the hosted repository where the function is defined + . + returned: success + type: str + deployedUrl: + description: + - The URL pointing to the hosted repository where the function were defined + at the time of deployment. + returned: success + type: str + httpsTrigger: + description: + - An HTTPS endpoint type of source that can be triggered via URL. + returned: success + type: complex + contains: + url: + description: + - The deployed url for the function. + returned: success + type: str + eventTrigger: + description: + - An HTTPS endpoint type of source that can be triggered via URL. + returned: success + type: complex + contains: + eventType: + description: + - 'The type of event to observe. For example: `providers/cloud.storage/eventTypes/object.change` + and `providers/cloud.pubsub/eventTypes/topic.publish`.' + returned: success + type: str + resource: + description: + - The resource(s) from which to observe events, for example, `projects/_/buckets/myBucket.` + . + returned: success + type: str + service: + description: + - The hostname of the service that should be observed. + returned: success + type: str + location: + description: + - The location of this cloud function. + returned: success + type: str + trigger_http: + description: + - Use HTTP to trigger this function. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudfunctions.googleapis.com/v1/projects/{project}/locations/{location}/functions".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'cloudfunctions') + return auth.list(link, return_if_object, array_name='functions') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job.py new file mode 100644 index 000000000..40559ff4c --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job.py @@ -0,0 +1,999 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudscheduler_job +description: +- A scheduled job that can publish a pubsub message or a http request every X interval + of time, using crontab format string. +- To use Cloud Scheduler your project must contain an App Engine app that is located + in one of the supported regions. If your project does not have an App Engine app, + you must create one. +short_description: Creates a GCP Job +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name of the job. + required: true + type: str + description: + description: + - A human-readable description for the job. This string must not contain more + than 500 characters. + required: false + type: str + schedule: + description: + - Describes the schedule on which the job will be executed. + required: false + type: str + time_zone: + description: + - Specifies the time zone to be used in interpreting schedule. + - The value of this field must be a time zone name from the tz database. + required: false + default: Etc/UTC + type: str + attempt_deadline: + description: + - The deadline for job attempts. If the request handler does not respond by this + deadline then the request is cancelled and the attempt is marked as a DEADLINE_EXCEEDED + failure. The failed attempt can be viewed in execution logs. Cloud Scheduler + will retry the job according to the RetryConfig. + - 'The allowed duration for this deadline is: * For HTTP targets, between 15 seconds + and 30 minutes.' + - "* For App Engine HTTP targets, between 15 seconds and 24 hours." + - "* **Note**: For PubSub targets, this field is ignored - setting it will introduce + an unresolvable diff." + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: "3.5s" .' + required: false + default: 180s + type: str + retry_config: + description: + - By default, if a job does not complete successfully, meaning that an acknowledgement + is not received from the handler, then it will be retried with exponential backoff + according to the settings . + required: false + type: dict + suboptions: + retry_count: + description: + - The number of attempts that the system will make to run a job using the + exponential backoff procedure described by maxDoublings. + - Values greater than 5 and negative values are not allowed. + required: false + type: int + max_retry_duration: + description: + - The time limit for retrying a failed job, measured from time when an execution + was first attempted. If specified with retryCount, the job will be retried + until both limits are reached. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + required: false + type: str + min_backoff_duration: + description: + - The minimum amount of time to wait before retrying a job after it fails. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + required: false + type: str + max_backoff_duration: + description: + - The maximum amount of time to wait before retrying a job after it fails. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + required: false + type: str + max_doublings: + description: + - The time between retries will double maxDoublings times. + - A job's retry interval starts at minBackoffDuration, then doubles maxDoublings + times, then increases linearly, and finally retries retries at intervals + of maxBackoffDuration up to retryCount times. + required: false + type: int + pubsub_target: + description: + - Pub/Sub target If the job providers a Pub/Sub target the cron will publish a + message to the provided topic . + required: false + type: dict + suboptions: + topic_name: + description: + - The full resource name for the Cloud Pub/Sub topic to which messages will + be published when a job is delivered. ~>**NOTE:** The topic name must be + in the same format as required by PubSub's PublishRequest.name, e.g. `projects/my-project/topics/my-topic`. + required: true + type: str + data: + description: + - The message payload for PubsubMessage. + - Pubsub message must contain either non-empty data, or at least one attribute. + - A base64-encoded string. + required: false + type: str + attributes: + description: + - Attributes for PubsubMessage. + - Pubsub message must contain either non-empty data, or at least one attribute. + required: false + type: dict + app_engine_http_target: + description: + - App Engine HTTP target. + - If the job providers a App Engine HTTP target the cron will send a request to + the service instance . + required: false + type: dict + suboptions: + http_method: + description: + - Which HTTP method to use for the request. + required: false + type: str + app_engine_routing: + description: + - App Engine Routing setting for the job. + required: false + type: dict + suboptions: + service: + description: + - App service. + - By default, the job is sent to the service which is the default service + when the job is attempted. + required: false + type: str + version: + description: + - App version. + - By default, the job is sent to the version which is the default version + when the job is attempted. + required: false + type: str + instance: + description: + - App instance. + - By default, the job is sent to an instance which is available when the + job is attempted. + required: false + type: str + relative_uri: + description: + - The relative URI. + required: true + type: str + body: + description: + - HTTP request body. A request body is allowed only if the HTTP method is + POST or PUT. It will result in invalid argument error to set a body on a + job with an incompatible HttpMethod. + - A base64-encoded string. + required: false + type: str + headers: + description: + - HTTP request headers. + - This map contains the header field names and values. Headers can be set + when the job is created. + required: false + type: dict + http_target: + description: + - HTTP target. + - If the job providers a http_target the cron will send a request to the targeted + url . + required: false + type: dict + suboptions: + uri: + description: + - The full URI path that the request will be sent to. + required: true + type: str + http_method: + description: + - Which HTTP method to use for the request. + required: false + type: str + body: + description: + - HTTP request body. A request body is allowed only if the HTTP method is + POST, PUT, or PATCH. It is an error to set body on a job with an incompatible + HttpMethod. + - A base64-encoded string. + required: false + type: str + headers: + description: + - This map contains the header field names and values. Repeated headers are + not supported, but a header value can contain commas. + required: false + type: dict + oauth_token: + description: + - Contains information needed for generating an OAuth token. + - This type of authorization should be used when sending requests to a GCP + endpoint. + required: false + type: dict + suboptions: + service_account_email: + description: + - Service account email to be used for generating OAuth token. + - The service account must be within the same project as the job. + required: true + type: str + scope: + description: + - OAuth scope to be used for generating OAuth access token. If not specified, + "U(https://www.googleapis.com/auth/cloud-platform") will be used. + required: false + type: str + oidc_token: + description: + - Contains information needed for generating an OpenID Connect token. + - This type of authorization should be used when sending requests to third + party endpoints or Cloud Run. + required: false + type: dict + suboptions: + service_account_email: + description: + - Service account email to be used for generating OAuth token. + - The service account must be within the same project as the job. + required: true + type: str + audience: + description: + - Audience to be used when generating OIDC token. If not specified, the + URI specified in target will be used. + required: false + type: str + region: + description: + - Region where the scheduler job resides . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/scheduler/docs/reference/rest/)' +- 'Official Documentation: U(https://cloud.google.com/scheduler/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a job + google.cloud.gcp_cloudscheduler_job: + name: job + region: us-central1 + schedule: "*/4 * * * *" + description: test app engine job + time_zone: Europe/London + attempt_deadline: 320s + app_engine_http_target: + http_method: POST + app_engine_routing: + service: web + version: prod + instance: my-instance-001 + relative_uri: "/ping" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name of the job. + returned: success + type: str +description: + description: + - A human-readable description for the job. This string must not contain more than + 500 characters. + returned: success + type: str +schedule: + description: + - Describes the schedule on which the job will be executed. + returned: success + type: str +timeZone: + description: + - Specifies the time zone to be used in interpreting schedule. + - The value of this field must be a time zone name from the tz database. + returned: success + type: str +attemptDeadline: + description: + - The deadline for job attempts. If the request handler does not respond by this + deadline then the request is cancelled and the attempt is marked as a DEADLINE_EXCEEDED + failure. The failed attempt can be viewed in execution logs. Cloud Scheduler will + retry the job according to the RetryConfig. + - 'The allowed duration for this deadline is: * For HTTP targets, between 15 seconds + and 30 minutes.' + - "* For App Engine HTTP targets, between 15 seconds and 24 hours." + - "* **Note**: For PubSub targets, this field is ignored - setting it will introduce + an unresolvable diff." + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: "3.5s" .' + returned: success + type: str +retryConfig: + description: + - By default, if a job does not complete successfully, meaning that an acknowledgement + is not received from the handler, then it will be retried with exponential backoff + according to the settings . + returned: success + type: complex + contains: + retryCount: + description: + - The number of attempts that the system will make to run a job using the exponential + backoff procedure described by maxDoublings. + - Values greater than 5 and negative values are not allowed. + returned: success + type: int + maxRetryDuration: + description: + - The time limit for retrying a failed job, measured from time when an execution + was first attempted. If specified with retryCount, the job will be retried + until both limits are reached. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + returned: success + type: str + minBackoffDuration: + description: + - The minimum amount of time to wait before retrying a job after it fails. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + returned: success + type: str + maxBackoffDuration: + description: + - The maximum amount of time to wait before retrying a job after it fails. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + returned: success + type: str + maxDoublings: + description: + - The time between retries will double maxDoublings times. + - A job's retry interval starts at minBackoffDuration, then doubles maxDoublings + times, then increases linearly, and finally retries retries at intervals of + maxBackoffDuration up to retryCount times. + returned: success + type: int +pubsubTarget: + description: + - Pub/Sub target If the job providers a Pub/Sub target the cron will publish a message + to the provided topic . + returned: success + type: complex + contains: + topicName: + description: + - The full resource name for the Cloud Pub/Sub topic to which messages will + be published when a job is delivered. ~>**NOTE:** The topic name must be in + the same format as required by PubSub's PublishRequest.name, e.g. `projects/my-project/topics/my-topic`. + returned: success + type: str + data: + description: + - The message payload for PubsubMessage. + - Pubsub message must contain either non-empty data, or at least one attribute. + - A base64-encoded string. + returned: success + type: str + attributes: + description: + - Attributes for PubsubMessage. + - Pubsub message must contain either non-empty data, or at least one attribute. + returned: success + type: dict +appEngineHttpTarget: + description: + - App Engine HTTP target. + - If the job providers a App Engine HTTP target the cron will send a request to + the service instance . + returned: success + type: complex + contains: + httpMethod: + description: + - Which HTTP method to use for the request. + returned: success + type: str + appEngineRouting: + description: + - App Engine Routing setting for the job. + returned: success + type: complex + contains: + service: + description: + - App service. + - By default, the job is sent to the service which is the default service + when the job is attempted. + returned: success + type: str + version: + description: + - App version. + - By default, the job is sent to the version which is the default version + when the job is attempted. + returned: success + type: str + instance: + description: + - App instance. + - By default, the job is sent to an instance which is available when the + job is attempted. + returned: success + type: str + relativeUri: + description: + - The relative URI. + returned: success + type: str + body: + description: + - HTTP request body. A request body is allowed only if the HTTP method is POST + or PUT. It will result in invalid argument error to set a body on a job with + an incompatible HttpMethod. + - A base64-encoded string. + returned: success + type: str + headers: + description: + - HTTP request headers. + - This map contains the header field names and values. Headers can be set when + the job is created. + returned: success + type: dict +httpTarget: + description: + - HTTP target. + - If the job providers a http_target the cron will send a request to the targeted + url . + returned: success + type: complex + contains: + uri: + description: + - The full URI path that the request will be sent to. + returned: success + type: str + httpMethod: + description: + - Which HTTP method to use for the request. + returned: success + type: str + body: + description: + - HTTP request body. A request body is allowed only if the HTTP method is POST, + PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. + - A base64-encoded string. + returned: success + type: str + headers: + description: + - This map contains the header field names and values. Repeated headers are + not supported, but a header value can contain commas. + returned: success + type: dict + oauthToken: + description: + - Contains information needed for generating an OAuth token. + - This type of authorization should be used when sending requests to a GCP endpoint. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating OAuth token. + - The service account must be within the same project as the job. + returned: success + type: str + scope: + description: + - OAuth scope to be used for generating OAuth access token. If not specified, + "U(https://www.googleapis.com/auth/cloud-platform") will be used. + returned: success + type: str + oidcToken: + description: + - Contains information needed for generating an OpenID Connect token. + - This type of authorization should be used when sending requests to third party + endpoints or Cloud Run. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating OAuth token. + - The service account must be within the same project as the job. + returned: success + type: str + audience: + description: + - Audience to be used when generating OIDC token. If not specified, the + URI specified in target will be used. + returned: success + type: str +region: + description: + - Region where the scheduler job resides . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + schedule=dict(type='str'), + time_zone=dict(default='Etc/UTC', type='str'), + attempt_deadline=dict(default='180s', type='str'), + retry_config=dict( + type='dict', + options=dict( + retry_count=dict(type='int'), + max_retry_duration=dict(type='str'), + min_backoff_duration=dict(type='str'), + max_backoff_duration=dict(type='str'), + max_doublings=dict(type='int'), + ), + ), + pubsub_target=dict(type='dict', options=dict(topic_name=dict(required=True, type='str'), data=dict(type='str'), attributes=dict(type='dict'))), + app_engine_http_target=dict( + type='dict', + options=dict( + http_method=dict(type='str'), + app_engine_routing=dict(type='dict', options=dict(service=dict(type='str'), version=dict(type='str'), instance=dict(type='str'))), + relative_uri=dict(required=True, type='str'), + body=dict(type='str'), + headers=dict(type='dict'), + ), + ), + http_target=dict( + type='dict', + options=dict( + uri=dict(required=True, type='str'), + http_method=dict(type='str'), + body=dict(type='str'), + headers=dict(type='dict'), + oauth_token=dict(type='dict', options=dict(service_account_email=dict(required=True, type='str'), scope=dict(type='str'))), + oidc_token=dict(type='dict', options=dict(service_account_email=dict(required=True, type='str'), audience=dict(type='str'))), + ), + ), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'cloudscheduler') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'cloudscheduler') + return return_if_object(module, auth.patch(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'cloudscheduler') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'schedule': module.params.get('schedule'), + u'timeZone': module.params.get('time_zone'), + u'attemptDeadline': module.params.get('attempt_deadline'), + u'retryConfig': JobRetryconfig(module.params.get('retry_config', {}), module).to_request(), + u'pubsubTarget': JobPubsubtarget(module.params.get('pubsub_target', {}), module).to_request(), + u'appEngineHttpTarget': JobAppenginehttptarget(module.params.get('app_engine_http_target', {}), module).to_request(), + u'httpTarget': JobHttptarget(module.params.get('http_target', {}), module).to_request(), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'cloudscheduler') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs/{name}".format(**module.params) + + +def collection(module): + return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_request(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_request(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': module.params.get('name'), + u'description': response.get(u'description'), + u'schedule': response.get(u'schedule'), + u'timeZone': response.get(u'timeZone'), + u'attemptDeadline': response.get(u'attemptDeadline'), + u'retryConfig': JobRetryconfig(response.get(u'retryConfig', {}), module).from_response(), + u'pubsubTarget': JobPubsubtarget(response.get(u'pubsubTarget', {}), module).from_response(), + u'appEngineHttpTarget': JobAppenginehttptarget(response.get(u'appEngineHttpTarget', {}), module).from_response(), + u'httpTarget': JobHttptarget(response.get(u'httpTarget', {}), module).from_response(), + } + + +def encode_request(request, module): + request['name'] = "projects/%s/locations/%s/jobs/%s" % (module.params['project'], module.params['region'], module.params['name']) + return request + + +def decode_request(response, module): + if 'name' in response: + response['name'] = response['name'].split('/')[-1] + + return response + + +class JobRetryconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'retryCount': self.request.get('retry_count'), + u'maxRetryDuration': self.request.get('max_retry_duration'), + u'minBackoffDuration': self.request.get('min_backoff_duration'), + u'maxBackoffDuration': self.request.get('max_backoff_duration'), + u'maxDoublings': self.request.get('max_doublings'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'retryCount': self.request.get(u'retryCount'), + u'maxRetryDuration': self.request.get(u'maxRetryDuration'), + u'minBackoffDuration': self.request.get(u'minBackoffDuration'), + u'maxBackoffDuration': self.request.get(u'maxBackoffDuration'), + u'maxDoublings': self.request.get(u'maxDoublings'), + } + ) + + +class JobPubsubtarget(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'topicName': self.request.get('topic_name'), u'data': self.request.get('data'), u'attributes': self.request.get('attributes')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'topicName': self.request.get(u'topicName'), u'data': self.request.get(u'data'), u'attributes': self.request.get(u'attributes')} + ) + + +class JobAppenginehttptarget(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'httpMethod': self.request.get('http_method'), + u'appEngineRouting': JobAppenginerouting(self.request.get('app_engine_routing', {}), self.module).to_request(), + u'relativeUri': self.request.get('relative_uri'), + u'body': self.request.get('body'), + u'headers': self.request.get('headers'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'httpMethod': self.request.get(u'httpMethod'), + u'appEngineRouting': JobAppenginerouting(self.request.get(u'appEngineRouting', {}), self.module).from_response(), + u'relativeUri': self.request.get(u'relativeUri'), + u'body': self.request.get(u'body'), + u'headers': self.request.get(u'headers'), + } + ) + + +class JobAppenginerouting(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'service': self.request.get('service'), u'version': self.request.get('version'), u'instance': self.request.get('instance')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'service': self.request.get(u'service'), u'version': self.request.get(u'version'), u'instance': self.request.get(u'instance')} + ) + + +class JobHttptarget(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'uri': self.request.get('uri'), + u'httpMethod': self.request.get('http_method'), + u'body': self.request.get('body'), + u'headers': self.request.get('headers'), + u'oauthToken': JobOauthtoken(self.request.get('oauth_token', {}), self.module).to_request(), + u'oidcToken': JobOidctoken(self.request.get('oidc_token', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'uri': self.request.get(u'uri'), + u'httpMethod': self.request.get(u'httpMethod'), + u'body': self.request.get(u'body'), + u'headers': self.request.get(u'headers'), + u'oauthToken': JobOauthtoken(self.request.get(u'oauthToken', {}), self.module).from_response(), + u'oidcToken': JobOidctoken(self.request.get(u'oidcToken', {}), self.module).from_response(), + } + ) + + +class JobOauthtoken(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'scope': self.request.get('scope')}) + + def from_response(self): + return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'scope': self.request.get(u'scope')}) + + +class JobOidctoken(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'audience': self.request.get('audience')}) + + def from_response(self): + return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'audience': self.request.get(u'audience')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job_info.py new file mode 100644 index 000000000..4ab155ebb --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudscheduler_job_info.py @@ -0,0 +1,415 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudscheduler_job_info +description: +- Gather info for GCP Job +short_description: Gather info for GCP Job +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + region: + description: + - Region where the scheduler job resides . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a job + gcp_cloudscheduler_job_info: + region: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the job. + returned: success + type: str + description: + description: + - A human-readable description for the job. This string must not contain more + than 500 characters. + returned: success + type: str + schedule: + description: + - Describes the schedule on which the job will be executed. + returned: success + type: str + timeZone: + description: + - Specifies the time zone to be used in interpreting schedule. + - The value of this field must be a time zone name from the tz database. + returned: success + type: str + attemptDeadline: + description: + - The deadline for job attempts. If the request handler does not respond by + this deadline then the request is cancelled and the attempt is marked as a + DEADLINE_EXCEEDED failure. The failed attempt can be viewed in execution logs. + Cloud Scheduler will retry the job according to the RetryConfig. + - 'The allowed duration for this deadline is: * For HTTP targets, between 15 + seconds and 30 minutes.' + - "* For App Engine HTTP targets, between 15 seconds and 24 hours." + - "* **Note**: For PubSub targets, this field is ignored - setting it will introduce + an unresolvable diff." + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: "3.5s" .' + returned: success + type: str + retryConfig: + description: + - By default, if a job does not complete successfully, meaning that an acknowledgement + is not received from the handler, then it will be retried with exponential + backoff according to the settings . + returned: success + type: complex + contains: + retryCount: + description: + - The number of attempts that the system will make to run a job using the + exponential backoff procedure described by maxDoublings. + - Values greater than 5 and negative values are not allowed. + returned: success + type: int + maxRetryDuration: + description: + - The time limit for retrying a failed job, measured from time when an execution + was first attempted. If specified with retryCount, the job will be retried + until both limits are reached. + - A duration in seconds with up to nine fractional digits, terminated by + 's'. + returned: success + type: str + minBackoffDuration: + description: + - The minimum amount of time to wait before retrying a job after it fails. + - A duration in seconds with up to nine fractional digits, terminated by + 's'. + returned: success + type: str + maxBackoffDuration: + description: + - The maximum amount of time to wait before retrying a job after it fails. + - A duration in seconds with up to nine fractional digits, terminated by + 's'. + returned: success + type: str + maxDoublings: + description: + - The time between retries will double maxDoublings times. + - A job's retry interval starts at minBackoffDuration, then doubles maxDoublings + times, then increases linearly, and finally retries retries at intervals + of maxBackoffDuration up to retryCount times. + returned: success + type: int + pubsubTarget: + description: + - Pub/Sub target If the job providers a Pub/Sub target the cron will publish + a message to the provided topic . + returned: success + type: complex + contains: + topicName: + description: + - The full resource name for the Cloud Pub/Sub topic to which messages will + be published when a job is delivered. ~>**NOTE:** The topic name must + be in the same format as required by PubSub's PublishRequest.name, e.g. + `projects/my-project/topics/my-topic`. + returned: success + type: str + data: + description: + - The message payload for PubsubMessage. + - Pubsub message must contain either non-empty data, or at least one attribute. + - A base64-encoded string. + returned: success + type: str + attributes: + description: + - Attributes for PubsubMessage. + - Pubsub message must contain either non-empty data, or at least one attribute. + returned: success + type: dict + appEngineHttpTarget: + description: + - App Engine HTTP target. + - If the job providers a App Engine HTTP target the cron will send a request + to the service instance . + returned: success + type: complex + contains: + httpMethod: + description: + - Which HTTP method to use for the request. + returned: success + type: str + appEngineRouting: + description: + - App Engine Routing setting for the job. + returned: success + type: complex + contains: + service: + description: + - App service. + - By default, the job is sent to the service which is the default service + when the job is attempted. + returned: success + type: str + version: + description: + - App version. + - By default, the job is sent to the version which is the default version + when the job is attempted. + returned: success + type: str + instance: + description: + - App instance. + - By default, the job is sent to an instance which is available when + the job is attempted. + returned: success + type: str + relativeUri: + description: + - The relative URI. + returned: success + type: str + body: + description: + - HTTP request body. A request body is allowed only if the HTTP method is + POST or PUT. It will result in invalid argument error to set a body on + a job with an incompatible HttpMethod. + - A base64-encoded string. + returned: success + type: str + headers: + description: + - HTTP request headers. + - This map contains the header field names and values. Headers can be set + when the job is created. + returned: success + type: dict + httpTarget: + description: + - HTTP target. + - If the job providers a http_target the cron will send a request to the targeted + url . + returned: success + type: complex + contains: + uri: + description: + - The full URI path that the request will be sent to. + returned: success + type: str + httpMethod: + description: + - Which HTTP method to use for the request. + returned: success + type: str + body: + description: + - HTTP request body. A request body is allowed only if the HTTP method is + POST, PUT, or PATCH. It is an error to set body on a job with an incompatible + HttpMethod. + - A base64-encoded string. + returned: success + type: str + headers: + description: + - This map contains the header field names and values. Repeated headers + are not supported, but a header value can contain commas. + returned: success + type: dict + oauthToken: + description: + - Contains information needed for generating an OAuth token. + - This type of authorization should be used when sending requests to a GCP + endpoint. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating OAuth token. + - The service account must be within the same project as the job. + returned: success + type: str + scope: + description: + - OAuth scope to be used for generating OAuth access token. If not specified, + "U(https://www.googleapis.com/auth/cloud-platform") will be used. + returned: success + type: str + oidcToken: + description: + - Contains information needed for generating an OpenID Connect token. + - This type of authorization should be used when sending requests to third + party endpoints or Cloud Run. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating OAuth token. + - The service account must be within the same project as the job. + returned: success + type: str + audience: + description: + - Audience to be used when generating OIDC token. If not specified, + the URI specified in target will be used. + returned: success + type: str + region: + description: + - Region where the scheduler job resides . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'cloudscheduler') + return auth.list(link, return_if_object, array_name='jobs') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue.py new file mode 100644 index 000000000..b2858599b --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue.py @@ -0,0 +1,700 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudtasks_queue +description: +- A named resource to which messages are sent by publishers. +short_description: Creates a GCP Queue +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The queue name. + required: false + type: str + app_engine_routing_override: + description: + - Overrides for task-level appEngineRouting. These settings apply only to App + Engine tasks in this queue . + required: false + type: dict + suboptions: + service: + description: + - App service. + - By default, the task is sent to the service which is the default service + when the task is attempted. + required: false + type: str + version: + description: + - App version. + - By default, the task is sent to the version which is the default version + when the task is attempted. + required: false + type: str + instance: + description: + - App instance. + - By default, the task is sent to an instance which is available when the + task is attempted. + required: false + type: str + rate_limits: + description: + - Rate limits for task dispatches. + - 'The queue''s actual dispatch rate is the result of: * Number of tasks in the + queue * User-specified throttling: rateLimits, retryConfig, and the queue''s + state.' + - "* System throttling due to 429 (Too Many Requests) or 503 (Service Unavailable) + responses from the worker, high error rates, or to smooth sudden large traffic + spikes." + required: false + type: dict + suboptions: + max_dispatches_per_second: + description: + - The maximum rate at which tasks are dispatched from this queue. + - If unspecified when the queue is created, Cloud Tasks will pick the default. + required: false + type: str + max_concurrent_dispatches: + description: + - The maximum number of concurrent tasks that Cloud Tasks allows to be dispatched + for this queue. After this threshold has been reached, Cloud Tasks stops + dispatching tasks until the number of concurrent requests decreases. + required: false + type: int + retry_config: + description: + - Settings that determine the retry behavior. + required: false + type: dict + suboptions: + max_attempts: + description: + - Number of attempts per task. + - Cloud Tasks will attempt the task maxAttempts times (that is, if the first + attempt fails, then there will be maxAttempts - 1 retries). Must be >= -1. + - If unspecified when the queue is created, Cloud Tasks will pick the default. + - "-1 indicates unlimited attempts." + required: false + type: int + max_retry_duration: + description: + - If positive, maxRetryDuration specifies the time limit for retrying a failed + task, measured from when the task was first attempted. Once maxRetryDuration + time has passed and the task has been attempted maxAttempts times, no further + attempts will be made and the task will be deleted. + - If zero, then the task age is unlimited. + required: false + type: str + min_backoff: + description: + - A task will be scheduled for retry between minBackoff and maxBackoff duration + after it fails, if the queue's RetryConfig specifies that the task should + be retried. + required: false + type: str + max_backoff: + description: + - A task will be scheduled for retry between minBackoff and maxBackoff duration + after it fails, if the queue's RetryConfig specifies that the task should + be retried. + required: false + type: str + max_doublings: + description: + - The time between retries will double maxDoublings times. + - A task's retry interval starts at minBackoff, then doubles maxDoublings + times, then increases linearly, and finally retries retries at intervals + of maxBackoff up to maxAttempts times. + required: false + type: int + stackdriver_logging_config: + description: + - Configuration options for writing logs to Stackdriver Logging. + required: false + type: dict + suboptions: + sampling_ratio: + description: + - Specifies the fraction of operations to write to Stackdriver Logging. + - This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is + the default and means that no operations are logged. + required: true + type: str + status: + description: + - The current state of the queue. + - 'Some valid choices include: "RUNNING", "PAUSED", "DISABLED"' + required: false + type: str + location: + description: + - The location of the queue. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a queue + google.cloud.gcp_cloudtasks_queue: + name: test_object + location: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The queue name. + returned: success + type: str +appEngineRoutingOverride: + description: + - Overrides for task-level appEngineRouting. These settings apply only to App Engine + tasks in this queue . + returned: success + type: complex + contains: + service: + description: + - App service. + - By default, the task is sent to the service which is the default service when + the task is attempted. + returned: success + type: str + version: + description: + - App version. + - By default, the task is sent to the version which is the default version when + the task is attempted. + returned: success + type: str + instance: + description: + - App instance. + - By default, the task is sent to an instance which is available when the task + is attempted. + returned: success + type: str + host: + description: + - The host that the task is sent to. + returned: success + type: str +rateLimits: + description: + - Rate limits for task dispatches. + - 'The queue''s actual dispatch rate is the result of: * Number of tasks in the + queue * User-specified throttling: rateLimits, retryConfig, and the queue''s state.' + - "* System throttling due to 429 (Too Many Requests) or 503 (Service Unavailable) + responses from the worker, high error rates, or to smooth sudden large traffic + spikes." + returned: success + type: complex + contains: + maxDispatchesPerSecond: + description: + - The maximum rate at which tasks are dispatched from this queue. + - If unspecified when the queue is created, Cloud Tasks will pick the default. + returned: success + type: str + maxConcurrentDispatches: + description: + - The maximum number of concurrent tasks that Cloud Tasks allows to be dispatched + for this queue. After this threshold has been reached, Cloud Tasks stops dispatching + tasks until the number of concurrent requests decreases. + returned: success + type: int + maxBurstSize: + description: + - The max burst size. + - Max burst size limits how fast tasks in queue are processed when many tasks + are in the queue and the rate is high. This field allows the queue to have + a high rate so processing starts shortly after a task is enqueued, but still + limits resource usage when many tasks are enqueued in a short period of time. + returned: success + type: int +retryConfig: + description: + - Settings that determine the retry behavior. + returned: success + type: complex + contains: + maxAttempts: + description: + - Number of attempts per task. + - Cloud Tasks will attempt the task maxAttempts times (that is, if the first + attempt fails, then there will be maxAttempts - 1 retries). Must be >= -1. + - If unspecified when the queue is created, Cloud Tasks will pick the default. + - "-1 indicates unlimited attempts." + returned: success + type: int + maxRetryDuration: + description: + - If positive, maxRetryDuration specifies the time limit for retrying a failed + task, measured from when the task was first attempted. Once maxRetryDuration + time has passed and the task has been attempted maxAttempts times, no further + attempts will be made and the task will be deleted. + - If zero, then the task age is unlimited. + returned: success + type: str + minBackoff: + description: + - A task will be scheduled for retry between minBackoff and maxBackoff duration + after it fails, if the queue's RetryConfig specifies that the task should + be retried. + returned: success + type: str + maxBackoff: + description: + - A task will be scheduled for retry between minBackoff and maxBackoff duration + after it fails, if the queue's RetryConfig specifies that the task should + be retried. + returned: success + type: str + maxDoublings: + description: + - The time between retries will double maxDoublings times. + - A task's retry interval starts at minBackoff, then doubles maxDoublings times, + then increases linearly, and finally retries retries at intervals of maxBackoff + up to maxAttempts times. + returned: success + type: int + purgeTime: + description: + - The last time this queue was purged. + returned: success + type: str +stackdriverLoggingConfig: + description: + - Configuration options for writing logs to Stackdriver Logging. + returned: success + type: complex + contains: + samplingRatio: + description: + - Specifies the fraction of operations to write to Stackdriver Logging. + - This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the + default and means that no operations are logged. + returned: success + type: str +status: + description: + - The current state of the queue. + returned: success + type: str +location: + description: + - The location of the queue. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(type='str'), + app_engine_routing_override=dict(type='dict', options=dict(service=dict(type='str'), version=dict(type='str'), instance=dict(type='str'))), + rate_limits=dict(type='dict', options=dict(max_dispatches_per_second=dict(type='str'), max_concurrent_dispatches=dict(type='int'))), + retry_config=dict( + type='dict', + options=dict( + max_attempts=dict(type='int'), + max_retry_duration=dict(type='str'), + min_backoff=dict(type='str'), + max_backoff=dict(type='str'), + max_doublings=dict(type='int'), + ), + ), + stackdriver_logging_config=dict(type='dict', options=dict(sampling_ratio=dict(required=True, type='str'))), + status=dict(type='str'), + location=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + if fetch: + instance = QueueStatus(module, fetch.get('state')) + instance.run() + if module.params.get('status'): + fetch.update({'status': module.params['status']}) + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'cloudtasks') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'cloudtasks') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + del request['name'] + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('appEngineRoutingOverride') != response.get('appEngineRoutingOverride'): + update_mask.append('appEngineRoutingOverride') + if request.get('rateLimits') != response.get('rateLimits'): + update_mask.append('rateLimits') + if request.get('retryConfig') != response.get('retryConfig'): + update_mask.append('retryConfig') + if request.get('stackdriverLoggingConfig') != response.get('stackdriverLoggingConfig'): + update_mask.append('stackdriverLoggingConfig') + if request.get('status') != response.get('status'): + update_mask.append('status') + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'cloudtasks') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': name_pattern(module.params.get('name'), module), + u'appEngineRoutingOverride': QueueAppengineroutingoverride(module.params.get('app_engine_routing_override', {}), module).to_request(), + u'rateLimits': QueueRatelimits(module.params.get('rate_limits', {}), module).to_request(), + u'retryConfig': QueueRetryconfig(module.params.get('retry_config', {}), module).to_request(), + u'stackdriverLoggingConfig': QueueStackdriverloggingconfig(module.params.get('stackdriver_logging_config', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'cloudtasks') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudtasks.googleapis.com/v2/projects/{project}/locations/{location}/queues/{name}".format(**module.params) + + +def collection(module): + return "https://cloudtasks.googleapis.com/v2/projects/{project}/locations/{location}/queues".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': name_pattern(module.params.get('name'), module), + u'appEngineRoutingOverride': QueueAppengineroutingoverride(response.get(u'appEngineRoutingOverride', {}), module).from_response(), + u'rateLimits': QueueRatelimits(response.get(u'rateLimits', {}), module).from_response(), + u'retryConfig': QueueRetryconfig(response.get(u'retryConfig', {}), module).from_response(), + u'stackdriverLoggingConfig': QueueStackdriverloggingconfig(response.get(u'stackdriverLoggingConfig', {}), module).from_response(), + } + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/locations/.*/queues/.*" + + if not re.match(regex, name): + name = "projects/{project}/locations/{location}/queues/{name}".format(**module.params) + + return name + + +class QueueStatus(object): + def __init__(self, module, current_status): + self.module = module + self.current_status = current_status + self.desired_status = self.module.params.get('status') + + def run(self): + # GcpRequest handles unicode text handling + if GcpRequest({'status': self.current_status}) == GcpRequest({'status': self.desired_status}): + return + elif self.desired_status == 'PAUSED': + self.stop() + elif self.desired_status == 'RUNNING': + self.start() + + def start(self): + auth = GcpSession(self.module, 'cloudtasks') + return_if_object(self.module, auth.post(self._start_url())) + + def stop(self): + auth = GcpSession(self.module, 'cloudtasks') + return_if_object(self.module, auth.post(self._stop_url())) + + def _start_url(self): + return "https://cloudtasks.googleapis.com/v2/projects/{project}/locations/{location}/queues/{name}:resume".format(**self.module.params) + + def _stop_url(self): + return "https://cloudtasks.googleapis.com/v2/projects/{project}/locations/{location}/queues/{name}:pause".format(**self.module.params) + + +class QueueAppengineroutingoverride(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'service': self.request.get('service'), u'version': self.request.get('version'), u'instance': self.request.get('instance')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'service': self.request.get(u'service'), u'version': self.request.get(u'version'), u'instance': self.request.get(u'instance')} + ) + + +class QueueRatelimits(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'maxDispatchesPerSecond': self.request.get('max_dispatches_per_second'), + u'maxConcurrentDispatches': self.request.get('max_concurrent_dispatches'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + {u'maxDispatchesPerSecond': self.request.get(u'maxDispatchesPerSecond'), u'maxConcurrentDispatches': self.request.get(u'maxConcurrentDispatches')} + ) + + +class QueueRetryconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'maxAttempts': self.request.get('max_attempts'), + u'maxRetryDuration': self.request.get('max_retry_duration'), + u'minBackoff': self.request.get('min_backoff'), + u'maxBackoff': self.request.get('max_backoff'), + u'maxDoublings': self.request.get('max_doublings'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'maxAttempts': self.request.get(u'maxAttempts'), + u'maxRetryDuration': self.request.get(u'maxRetryDuration'), + u'minBackoff': self.request.get(u'minBackoff'), + u'maxBackoff': self.request.get(u'maxBackoff'), + u'maxDoublings': self.request.get(u'maxDoublings'), + } + ) + + +class QueueStackdriverloggingconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'samplingRatio': self.request.get('sampling_ratio')}) + + def from_response(self): + return remove_nones_from_dict({u'samplingRatio': self.request.get(u'samplingRatio')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue_info.py new file mode 100644 index 000000000..95f306b40 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_cloudtasks_queue_info.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudtasks_queue_info +description: +- Gather info for GCP Queue +short_description: Gather info for GCP Queue +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location of the queue. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a queue + gcp_cloudtasks_queue_info: + location: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The queue name. + returned: success + type: str + appEngineRoutingOverride: + description: + - Overrides for task-level appEngineRouting. These settings apply only to App + Engine tasks in this queue . + returned: success + type: complex + contains: + service: + description: + - App service. + - By default, the task is sent to the service which is the default service + when the task is attempted. + returned: success + type: str + version: + description: + - App version. + - By default, the task is sent to the version which is the default version + when the task is attempted. + returned: success + type: str + instance: + description: + - App instance. + - By default, the task is sent to an instance which is available when the + task is attempted. + returned: success + type: str + host: + description: + - The host that the task is sent to. + returned: success + type: str + rateLimits: + description: + - Rate limits for task dispatches. + - 'The queue''s actual dispatch rate is the result of: * Number of tasks in + the queue * User-specified throttling: rateLimits, retryConfig, and the queue''s + state.' + - "* System throttling due to 429 (Too Many Requests) or 503 (Service Unavailable) + responses from the worker, high error rates, or to smooth sudden large traffic + spikes." + returned: success + type: complex + contains: + maxDispatchesPerSecond: + description: + - The maximum rate at which tasks are dispatched from this queue. + - If unspecified when the queue is created, Cloud Tasks will pick the default. + returned: success + type: str + maxConcurrentDispatches: + description: + - The maximum number of concurrent tasks that Cloud Tasks allows to be dispatched + for this queue. After this threshold has been reached, Cloud Tasks stops + dispatching tasks until the number of concurrent requests decreases. + returned: success + type: int + maxBurstSize: + description: + - The max burst size. + - Max burst size limits how fast tasks in queue are processed when many + tasks are in the queue and the rate is high. This field allows the queue + to have a high rate so processing starts shortly after a task is enqueued, + but still limits resource usage when many tasks are enqueued in a short + period of time. + returned: success + type: int + retryConfig: + description: + - Settings that determine the retry behavior. + returned: success + type: complex + contains: + maxAttempts: + description: + - Number of attempts per task. + - Cloud Tasks will attempt the task maxAttempts times (that is, if the first + attempt fails, then there will be maxAttempts - 1 retries). Must be >= + -1. + - If unspecified when the queue is created, Cloud Tasks will pick the default. + - "-1 indicates unlimited attempts." + returned: success + type: int + maxRetryDuration: + description: + - If positive, maxRetryDuration specifies the time limit for retrying a + failed task, measured from when the task was first attempted. Once maxRetryDuration + time has passed and the task has been attempted maxAttempts times, no + further attempts will be made and the task will be deleted. + - If zero, then the task age is unlimited. + returned: success + type: str + minBackoff: + description: + - A task will be scheduled for retry between minBackoff and maxBackoff duration + after it fails, if the queue's RetryConfig specifies that the task should + be retried. + returned: success + type: str + maxBackoff: + description: + - A task will be scheduled for retry between minBackoff and maxBackoff duration + after it fails, if the queue's RetryConfig specifies that the task should + be retried. + returned: success + type: str + maxDoublings: + description: + - The time between retries will double maxDoublings times. + - A task's retry interval starts at minBackoff, then doubles maxDoublings + times, then increases linearly, and finally retries retries at intervals + of maxBackoff up to maxAttempts times. + returned: success + type: int + purgeTime: + description: + - The last time this queue was purged. + returned: success + type: str + stackdriverLoggingConfig: + description: + - Configuration options for writing logs to Stackdriver Logging. + returned: success + type: complex + contains: + samplingRatio: + description: + - Specifies the fraction of operations to write to Stackdriver Logging. + - This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is + the default and means that no operations are logged. + returned: success + type: str + status: + description: + - The current state of the queue. + returned: success + type: str + location: + description: + - The location of the queue. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudtasks.googleapis.com/v2/projects/{project}/locations/{location}/queues".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'cloudtasks') + return auth.list(link, return_if_object, array_name='queues') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_address.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_address.py new file mode 100644 index 000000000..122db491f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_address.py @@ -0,0 +1,512 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_address +description: +- Represents an Address resource. +- Each virtual machine instance has an ephemeral internal IP address and, optionally, + an external IP address. To communicate between instances on the same network, you + can use an instance's internal IP address. To communicate with the Internet and + instances outside of the same network, you must specify the instance's external + IP address. +- Internal IP addresses are ephemeral and only belong to an instance for the lifetime + of the instance; if the instance is deleted and recreated, the instance is assigned + a new internal IP address, either by Compute Engine or by you. External IP addresses + can be either ephemeral or static. +short_description: Creates a GCP Address +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + address: + description: + - The static external IP address represented by this resource. Only IPv4 is supported. + An address may only be specified for INTERNAL address types. The IP address + must be inside the specified subnetwork, if any. + required: false + type: str + address_type: + description: + - The type of address to reserve. + - 'Some valid choices include: "INTERNAL", "EXTERNAL"' + required: false + default: EXTERNAL + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + type: str + purpose: + description: + - 'The purpose of this resource, which can be one of the following values: * GCE_ENDPOINT + for addresses that are used by VM instances, alias IP ranges, internal load + balancers, and similar resources.' + - "* SHARED_LOADBALANCER_VIP for an address that can be used by multiple internal + load balancers." + - "* VPC_PEERING for addresses that are reserved for VPC peer networks." + - "* IPSEC_INTERCONNECT for addresses created from a private IP range that are + reserved for a VLAN attachment in an IPsec-encrypted Cloud Interconnect configuration. + These addresses are regional resources." + - This should only be set when using an Internal address. + required: false + type: str + network_tier: + description: + - The networking tier used for configuring this address. If this field is not + specified, it is assumed to be PREMIUM. + - 'Some valid choices include: "PREMIUM", "STANDARD"' + required: false + type: str + subnetwork: + description: + - The URL of the subnetwork in which to reserve the address. If an IP address + is specified, it must be within the subnetwork's IP range. + - This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER + purposes. + - 'This field represents a link to a Subnetwork resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_subnetwork task and then set this subnetwork field to "{{ name-of-resource + }}"' + required: false + type: dict + network: + description: + - The URL of the network in which to reserve the address. This field can only + be used with INTERNAL type with the VPC_PEERING and IPSEC_INTERCONNECT purposes. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + type: dict + prefix_length: + description: + - The prefix length if the resource represents an IP range. + required: false + type: int + region: + description: + - URL of the region where the regional address resides. + - This field is not applicable to global addresses. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/beta/addresses)' +- 'Reserving a Static External IP Address: U(https://cloud.google.com/compute/docs/instances-and-network)' +- 'Reserving a Static Internal IP Address: U(https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a address + google.cloud.gcp_compute_address: + name: test-address1 + region: us-west1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +address: + description: + - The static external IP address represented by this resource. Only IPv4 is supported. + An address may only be specified for INTERNAL address types. The IP address must + be inside the specified subnetwork, if any. + returned: success + type: str +addressType: + description: + - The type of address to reserve. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + returned: success + type: str +purpose: + description: + - 'The purpose of this resource, which can be one of the following values: * GCE_ENDPOINT + for addresses that are used by VM instances, alias IP ranges, internal load balancers, + and similar resources.' + - "* SHARED_LOADBALANCER_VIP for an address that can be used by multiple internal + load balancers." + - "* VPC_PEERING for addresses that are reserved for VPC peer networks." + - "* IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved + for a VLAN attachment in an IPsec-encrypted Cloud Interconnect configuration. + These addresses are regional resources." + - This should only be set when using an Internal address. + returned: success + type: str +networkTier: + description: + - The networking tier used for configuring this address. If this field is not specified, + it is assumed to be PREMIUM. + returned: success + type: str +subnetwork: + description: + - The URL of the subnetwork in which to reserve the address. If an IP address is + specified, it must be within the subnetwork's IP range. + - This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER + purposes. + returned: success + type: dict +users: + description: + - The URLs of the resources that are using this address. + returned: success + type: list +status: + description: + - The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. + - An address that is RESERVING is currently in the process of being reserved. + - A RESERVED address is currently reserved and available to use. An IN_USE address + is currently being used by another resource and is not available. + returned: success + type: str +network: + description: + - The URL of the network in which to reserve the address. This field can only be + used with INTERNAL type with the VPC_PEERING and IPSEC_INTERCONNECT purposes. + returned: success + type: dict +prefixLength: + description: + - The prefix length if the resource represents an IP range. + returned: success + type: int +region: + description: + - URL of the region where the regional address resides. + - This field is not applicable to global addresses. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + address=dict(type='str'), + address_type=dict(default='EXTERNAL', type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + purpose=dict(type='str'), + network_tier=dict(type='str'), + subnetwork=dict(type='dict'), + network=dict(type='dict'), + prefix_length=dict(type='int'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#address' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#address', + u'address': module.params.get('address'), + u'addressType': module.params.get('address_type'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'purpose': module.params.get('purpose'), + u'networkTier': module.params.get('network_tier'), + u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'prefixLength': module.params.get('prefix_length'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/addresses/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/addresses".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'address': response.get(u'address'), + u'addressType': response.get(u'addressType'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'purpose': response.get(u'purpose'), + u'networkTier': response.get(u'networkTier'), + u'subnetwork': response.get(u'subnetwork'), + u'users': response.get(u'users'), + u'status': response.get(u'status'), + u'network': response.get(u'network'), + u'prefixLength': response.get(u'prefixLength'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#address') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_address_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_address_info.py new file mode 100644 index 000000000..bbd8c2c80 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_address_info.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_address_info +description: +- Gather info for GCP Address +short_description: Gather info for GCP Address +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - URL of the region where the regional address resides. + - This field is not applicable to global addresses. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an address + gcp_compute_address_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + address: + description: + - The static external IP address represented by this resource. Only IPv4 is + supported. An address may only be specified for INTERNAL address types. The + IP address must be inside the specified subnetwork, if any. + returned: success + type: str + addressType: + description: + - The type of address to reserve. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + purpose: + description: + - 'The purpose of this resource, which can be one of the following values: * + GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, + internal load balancers, and similar resources.' + - "* SHARED_LOADBALANCER_VIP for an address that can be used by multiple internal + load balancers." + - "* VPC_PEERING for addresses that are reserved for VPC peer networks." + - "* IPSEC_INTERCONNECT for addresses created from a private IP range that are + reserved for a VLAN attachment in an IPsec-encrypted Cloud Interconnect configuration. + These addresses are regional resources." + - This should only be set when using an Internal address. + returned: success + type: str + networkTier: + description: + - The networking tier used for configuring this address. If this field is not + specified, it is assumed to be PREMIUM. + returned: success + type: str + subnetwork: + description: + - The URL of the subnetwork in which to reserve the address. If an IP address + is specified, it must be within the subnetwork's IP range. + - This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER + purposes. + returned: success + type: dict + users: + description: + - The URLs of the resources that are using this address. + returned: success + type: list + status: + description: + - The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. + - An address that is RESERVING is currently in the process of being reserved. + - A RESERVED address is currently reserved and available to use. An IN_USE address + is currently being used by another resource and is not available. + returned: success + type: str + network: + description: + - The URL of the network in which to reserve the address. This field can only + be used with INTERNAL type with the VPC_PEERING and IPSEC_INTERCONNECT purposes. + returned: success + type: dict + prefixLength: + description: + - The prefix length if the resource represents an IP range. + returned: success + type: int + region: + description: + - URL of the region where the regional address resides. + - This field is not applicable to global addresses. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/addresses".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler.py new file mode 100644 index 000000000..d3acc7846 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler.py @@ -0,0 +1,929 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_autoscaler +description: +- Represents an Autoscaler resource. +- Autoscalers allow you to automatically scale virtual machine instances in managed + instance groups according to an autoscaling policy that you define. +short_description: Creates a GCP Autoscaler +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + autoscaling_policy: + description: + - 'The configuration parameters for the autoscaling algorithm. You can define + one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, + and loadBalancingUtilization.' + - If none of these are specified, the default will be to autoscale based on cpuUtilization + to 0.6 or 60%. + required: true + type: dict + suboptions: + min_num_replicas: + description: + - The minimum number of replicas that the autoscaler can scale down to. This + cannot be less than 0. If not provided, autoscaler will choose a default + value depending on maximum number of instances allowed. + required: false + type: int + aliases: + - minReplicas + max_num_replicas: + description: + - The maximum number of instances that the autoscaler can scale up to. This + is required when creating or updating an autoscaler. The maximum number + of replicas should not be lower than minimal number of replicas. + required: true + type: int + aliases: + - maxReplicas + cool_down_period_sec: + description: + - The number of seconds that the autoscaler should wait before it starts collecting + information from a new instance. This prevents the autoscaler from collecting + information when the instance is initializing, during which the collected + usage would not be reliable. The default time autoscaler waits is 60 seconds. + - Virtual machine initialization times might vary because of numerous factors. + We recommend that you test how long an instance may take to initialize. + To do this, create an instance and time the startup process. + required: false + default: '60' + type: int + aliases: + - cooldownPeriod + mode: + description: + - Defines operating mode for this policy. + - 'Some valid choices include: "OFF", "ONLY_UP", "ON"' + required: false + default: 'ON' + type: str + scale_in_control: + description: + - Defines scale in controls to reduce the risk of response latency and outages + due to abrupt scale-in events . + required: false + type: dict + suboptions: + max_scaled_in_replicas: + description: + - A nested object resource. + required: false + type: dict + suboptions: + fixed: + description: + - Specifies a fixed number of VM instances. This must be a positive + integer. + required: false + type: int + percent: + description: + - Specifies a percentage of instances between 0 to 100%, inclusive. + - For example, specify 80 for 80%. + required: false + type: int + time_window_sec: + description: + - How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + required: false + type: int + cpu_utilization: + description: + - Defines the CPU utilization policy that allows the autoscaler to scale based + on the average CPU utilization of a managed instance group. + required: false + type: dict + suboptions: + utilization_target: + description: + - The target CPU utilization that the autoscaler should maintain. + - Must be a float value in the range (0, 1]. If not specified, the default + is 0.6. + - If the CPU level is below the target utilization, the autoscaler scales + down the number of instances until it reaches the minimum number of + instances you specified or until the average CPU of your instances reaches + the target utilization. + - If the average CPU is above the target utilization, the autoscaler scales + up until it reaches the maximum number of instances you specified or + until the average utilization reaches the target utilization. + required: false + type: str + aliases: + - target + predictive_method: + description: + - 'Indicates whether predictive autoscaling based on CPU metric is enabled. + Valid values are: - NONE (default). No predictive method is used. The + autoscaler scales the group to meet current demand based on real-time + metrics.' + - "- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + by monitoring daily and weekly load patterns and scaling out ahead of + anticipated demand." + required: false + default: NONE + type: str + custom_metric_utilizations: + description: + - Configuration parameters of autoscaling based on a custom metric. + elements: dict + required: false + type: list + aliases: + - metric + suboptions: + metric: + description: + - The identifier (type) of the Stackdriver Monitoring metric. + - The metric cannot have negative values. + - The metric must have a value type of INT64 or DOUBLE. + required: true + type: str + aliases: + - name + utilization_target: + description: + - The target value of the metric that autoscaler should maintain. This + must be a positive value. A utilization metric scales number of virtual + machines handling requests to increase or decrease proportionally to + the metric. + - For example, a good metric to use as a utilizationTarget is U(www.googleapis.com/compute/instance/network/received_bytes_count). + - The autoscaler will work to keep this value constant for each of the + instances. + required: false + type: str + aliases: + - target + utilization_target_type: + description: + - Defines how target utilization value is expressed for a Stackdriver + Monitoring metric. + - 'Some valid choices include: "GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"' + required: false + type: str + aliases: + - type + load_balancing_utilization: + description: + - Configuration parameters of autoscaling based on a load balancer. + required: false + type: dict + suboptions: + utilization_target: + description: + - Fraction of backend capacity utilization (set in HTTP(s) load balancing + configuration) that autoscaler should maintain. Must be a positive float + value. If not defined, the default is 0.8. + required: false + type: str + aliases: + - target + target: + description: + - URL of the managed instance group that this autoscaler will scale. + - 'This field represents a link to a InstanceGroupManager resource in GCP. It + can be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_instance_group_manager task and then set + this target field to "{{ name-of-resource }}"' + required: true + type: dict + zone: + description: + - URL of the zone where the instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/autoscalers)' +- 'Autoscaling Groups of Instances: U(https://cloud.google.com/compute/docs/autoscaler/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instancetemplate + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a address + google.cloud.gcp_compute_address: + name: address-instancetemplate + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a instance template + google.cloud.gcp_compute_instance_template: + name: "{{ resource_name }}" + properties: + disks: + - auto_delete: 'true' + boot: 'true' + initialize_params: + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + machine_type: n1-standard-1 + network_interfaces: + - network: "{{ network }}" + access_configs: + - name: test-config + type: ONE_TO_ONE_NAT + nat_ip: "{{ address }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancetemplate + +- name: create a instance group manager + google.cloud.gcp_compute_instance_group_manager: + name: "{{ resource_name }}" + base_instance_name: test1-child + instance_template: "{{ instancetemplate }}" + target_size: 3 + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: igm + +- name: create a autoscaler + google.cloud.gcp_compute_autoscaler: + name: test_object + zone: us-central1-a + target: "{{ igm }}" + autoscaling_policy: + max_num_replicas: 5 + min_num_replicas: 1 + cool_down_period_sec: 60 + cpu_utilization: + utilization_target: 0.5 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +id: + description: + - Unique identifier for the resource. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +name: + description: + - Name of the resource. The name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + a lowercase letter, and all following characters must be a dash, lowercase letter, + or digit, except the last character, which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +autoscalingPolicy: + description: + - 'The configuration parameters for the autoscaling algorithm. You can define one + or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, + and loadBalancingUtilization.' + - If none of these are specified, the default will be to autoscale based on cpuUtilization + to 0.6 or 60%. + returned: success + type: complex + contains: + minNumReplicas: + description: + - The minimum number of replicas that the autoscaler can scale down to. This + cannot be less than 0. If not provided, autoscaler will choose a default value + depending on maximum number of instances allowed. + returned: success + type: int + maxNumReplicas: + description: + - The maximum number of instances that the autoscaler can scale up to. This + is required when creating or updating an autoscaler. The maximum number of + replicas should not be lower than minimal number of replicas. + returned: success + type: int + coolDownPeriodSec: + description: + - The number of seconds that the autoscaler should wait before it starts collecting + information from a new instance. This prevents the autoscaler from collecting + information when the instance is initializing, during which the collected + usage would not be reliable. The default time autoscaler waits is 60 seconds. + - Virtual machine initialization times might vary because of numerous factors. + We recommend that you test how long an instance may take to initialize. To + do this, create an instance and time the startup process. + returned: success + type: int + mode: + description: + - Defines operating mode for this policy. + returned: success + type: str + scaleInControl: + description: + - Defines scale in controls to reduce the risk of response latency and outages + due to abrupt scale-in events . + returned: success + type: complex + contains: + maxScaledInReplicas: + description: + - A nested object resource. + returned: success + type: complex + contains: + fixed: + description: + - Specifies a fixed number of VM instances. This must be a positive + integer. + returned: success + type: int + percent: + description: + - Specifies a percentage of instances between 0 to 100%, inclusive. + - For example, specify 80 for 80%. + returned: success + type: int + timeWindowSec: + description: + - How long back autoscaling should look when computing recommendations to + include directives regarding slower scale down, as described above. + returned: success + type: int + cpuUtilization: + description: + - Defines the CPU utilization policy that allows the autoscaler to scale based + on the average CPU utilization of a managed instance group. + returned: success + type: complex + contains: + utilizationTarget: + description: + - The target CPU utilization that the autoscaler should maintain. + - Must be a float value in the range (0, 1]. If not specified, the default + is 0.6. + - If the CPU level is below the target utilization, the autoscaler scales + down the number of instances until it reaches the minimum number of instances + you specified or until the average CPU of your instances reaches the target + utilization. + - If the average CPU is above the target utilization, the autoscaler scales + up until it reaches the maximum number of instances you specified or until + the average utilization reaches the target utilization. + returned: success + type: str + predictiveMethod: + description: + - 'Indicates whether predictive autoscaling based on CPU metric is enabled. + Valid values are: - NONE (default). No predictive method is used. The + autoscaler scales the group to meet current demand based on real-time + metrics.' + - "- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + by monitoring daily and weekly load patterns and scaling out ahead of + anticipated demand." + returned: success + type: str + customMetricUtilizations: + description: + - Configuration parameters of autoscaling based on a custom metric. + returned: success + type: complex + contains: + metric: + description: + - The identifier (type) of the Stackdriver Monitoring metric. + - The metric cannot have negative values. + - The metric must have a value type of INT64 or DOUBLE. + returned: success + type: str + utilizationTarget: + description: + - The target value of the metric that autoscaler should maintain. This must + be a positive value. A utilization metric scales number of virtual machines + handling requests to increase or decrease proportionally to the metric. + - For example, a good metric to use as a utilizationTarget is U(www.googleapis.com/compute/instance/network/received_bytes_count). + - The autoscaler will work to keep this value constant for each of the instances. + returned: success + type: str + utilizationTargetType: + description: + - Defines how target utilization value is expressed for a Stackdriver Monitoring + metric. + returned: success + type: str + loadBalancingUtilization: + description: + - Configuration parameters of autoscaling based on a load balancer. + returned: success + type: complex + contains: + utilizationTarget: + description: + - Fraction of backend capacity utilization (set in HTTP(s) load balancing + configuration) that autoscaler should maintain. Must be a positive float + value. If not defined, the default is 0.8. + returned: success + type: str +target: + description: + - URL of the managed instance group that this autoscaler will scale. + returned: success + type: dict +zone: + description: + - URL of the zone where the instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + autoscaling_policy=dict( + required=True, + type='dict', + options=dict( + min_num_replicas=dict(type='int', aliases=['minReplicas']), + max_num_replicas=dict(required=True, type='int', aliases=['maxReplicas']), + cool_down_period_sec=dict(default=60, type='int', aliases=['cooldownPeriod']), + mode=dict(default='ON', type='str'), + scale_in_control=dict( + type='dict', + options=dict( + max_scaled_in_replicas=dict(type='dict', options=dict(fixed=dict(type='int'), percent=dict(type='int'))), + time_window_sec=dict(type='int'), + ), + ), + cpu_utilization=dict( + type='dict', options=dict(utilization_target=dict(type='str', aliases=['target']), predictive_method=dict(default='NONE', type='str')) + ), + custom_metric_utilizations=dict( + type='list', + elements='dict', + aliases=['metric'], + options=dict( + metric=dict(required=True, type='str', aliases=['name']), + utilization_target=dict(type='str', aliases=['target']), + utilization_target_type=dict(type='str', aliases=['type']), + ), + ), + load_balancing_utilization=dict(type='dict', options=dict(utilization_target=dict(type='str', aliases=['target']))), + ), + ), + target=dict(required=True, type='dict'), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#autoscaler' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#autoscaler', + u'zone': module.params.get('zone'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'autoscalingPolicy': AutoscalerAutoscalingpolicy(module.params.get('autoscaling_policy', {}), module).to_request(), + u'target': replace_resource_dict(module.params.get(u'target', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/autoscalers/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/autoscalers".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'id': response.get(u'id'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'name': module.params.get('name'), + u'description': response.get(u'description'), + u'autoscalingPolicy': AutoscalerAutoscalingpolicy(response.get(u'autoscalingPolicy', {}), module).from_response(), + u'target': response.get(u'target'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#autoscaler') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class AutoscalerAutoscalingpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'minNumReplicas': self.request.get('min_num_replicas'), + u'maxNumReplicas': self.request.get('max_num_replicas'), + u'coolDownPeriodSec': self.request.get('cool_down_period_sec'), + u'mode': self.request.get('mode'), + u'scaleInControl': AutoscalerScaleincontrol(self.request.get('scale_in_control', {}), self.module).to_request(), + u'cpuUtilization': AutoscalerCpuutilization(self.request.get('cpu_utilization', {}), self.module).to_request(), + u'customMetricUtilizations': AutoscalerCustommetricutilizationsArray( + self.request.get('custom_metric_utilizations', []), self.module + ).to_request(), + u'loadBalancingUtilization': AutoscalerLoadbalancingutilization(self.request.get('load_balancing_utilization', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'minNumReplicas': self.request.get(u'minNumReplicas'), + u'maxNumReplicas': self.request.get(u'maxNumReplicas'), + u'coolDownPeriodSec': self.request.get(u'coolDownPeriodSec'), + u'mode': self.request.get(u'mode'), + u'scaleInControl': AutoscalerScaleincontrol(self.request.get(u'scaleInControl', {}), self.module).from_response(), + u'cpuUtilization': AutoscalerCpuutilization(self.request.get(u'cpuUtilization', {}), self.module).from_response(), + u'customMetricUtilizations': AutoscalerCustommetricutilizationsArray( + self.request.get(u'customMetricUtilizations', []), self.module + ).from_response(), + u'loadBalancingUtilization': AutoscalerLoadbalancingutilization(self.request.get(u'loadBalancingUtilization', {}), self.module).from_response(), + } + ) + + +class AutoscalerScaleincontrol(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'maxScaledInReplicas': AutoscalerMaxscaledinreplicas(self.request.get('max_scaled_in_replicas', {}), self.module).to_request(), + u'timeWindowSec': self.request.get('time_window_sec'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'maxScaledInReplicas': AutoscalerMaxscaledinreplicas(self.request.get(u'maxScaledInReplicas', {}), self.module).from_response(), + u'timeWindowSec': self.request.get(u'timeWindowSec'), + } + ) + + +class AutoscalerMaxscaledinreplicas(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'fixed': self.request.get('fixed'), u'percent': self.request.get('percent')}) + + def from_response(self): + return remove_nones_from_dict({u'fixed': self.request.get(u'fixed'), u'percent': self.request.get(u'percent')}) + + +class AutoscalerCpuutilization(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'utilizationTarget': self.request.get('utilization_target'), u'predictiveMethod': self.request.get('predictive_method')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'utilizationTarget': self.request.get(u'utilizationTarget'), u'predictiveMethod': self.request.get(u'predictiveMethod')} + ) + + +class AutoscalerCustommetricutilizationsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + {u'metric': item.get('metric'), u'utilizationTarget': item.get('utilization_target'), u'utilizationTargetType': item.get('utilization_target_type')} + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'metric': item.get(u'metric'), u'utilizationTarget': item.get(u'utilizationTarget'), u'utilizationTargetType': item.get(u'utilizationTargetType')} + ) + + +class AutoscalerLoadbalancingutilization(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'utilizationTarget': self.request.get('utilization_target')}) + + def from_response(self): + return remove_nones_from_dict({u'utilizationTarget': self.request.get(u'utilizationTarget')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler_info.py new file mode 100644 index 000000000..f8df9f4d5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_autoscaler_info.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_autoscaler_info +description: +- Gather info for GCP Autoscaler +short_description: Gather info for GCP Autoscaler +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - URL of the zone where the instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an autoscaler + gcp_compute_autoscaler_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - Unique identifier for the resource. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + autoscalingPolicy: + description: + - 'The configuration parameters for the autoscaling algorithm. You can define + one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, + and loadBalancingUtilization.' + - If none of these are specified, the default will be to autoscale based on + cpuUtilization to 0.6 or 60%. + returned: success + type: complex + contains: + minNumReplicas: + description: + - The minimum number of replicas that the autoscaler can scale down to. + This cannot be less than 0. If not provided, autoscaler will choose a + default value depending on maximum number of instances allowed. + returned: success + type: int + maxNumReplicas: + description: + - The maximum number of instances that the autoscaler can scale up to. This + is required when creating or updating an autoscaler. The maximum number + of replicas should not be lower than minimal number of replicas. + returned: success + type: int + coolDownPeriodSec: + description: + - The number of seconds that the autoscaler should wait before it starts + collecting information from a new instance. This prevents the autoscaler + from collecting information when the instance is initializing, during + which the collected usage would not be reliable. The default time autoscaler + waits is 60 seconds. + - Virtual machine initialization times might vary because of numerous factors. + We recommend that you test how long an instance may take to initialize. + To do this, create an instance and time the startup process. + returned: success + type: int + mode: + description: + - Defines operating mode for this policy. + returned: success + type: str + scaleInControl: + description: + - Defines scale in controls to reduce the risk of response latency and outages + due to abrupt scale-in events . + returned: success + type: complex + contains: + maxScaledInReplicas: + description: + - A nested object resource. + returned: success + type: complex + contains: + fixed: + description: + - Specifies a fixed number of VM instances. This must be a positive + integer. + returned: success + type: int + percent: + description: + - Specifies a percentage of instances between 0 to 100%, inclusive. + - For example, specify 80 for 80%. + returned: success + type: int + timeWindowSec: + description: + - How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + returned: success + type: int + cpuUtilization: + description: + - Defines the CPU utilization policy that allows the autoscaler to scale + based on the average CPU utilization of a managed instance group. + returned: success + type: complex + contains: + utilizationTarget: + description: + - The target CPU utilization that the autoscaler should maintain. + - Must be a float value in the range (0, 1]. If not specified, the default + is 0.6. + - If the CPU level is below the target utilization, the autoscaler scales + down the number of instances until it reaches the minimum number of + instances you specified or until the average CPU of your instances + reaches the target utilization. + - If the average CPU is above the target utilization, the autoscaler + scales up until it reaches the maximum number of instances you specified + or until the average utilization reaches the target utilization. + returned: success + type: str + predictiveMethod: + description: + - 'Indicates whether predictive autoscaling based on CPU metric is enabled. + Valid values are: - NONE (default). No predictive method is used. + The autoscaler scales the group to meet current demand based on real-time + metrics.' + - "- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + by monitoring daily and weekly load patterns and scaling out ahead + of anticipated demand." + returned: success + type: str + customMetricUtilizations: + description: + - Configuration parameters of autoscaling based on a custom metric. + returned: success + type: complex + contains: + metric: + description: + - The identifier (type) of the Stackdriver Monitoring metric. + - The metric cannot have negative values. + - The metric must have a value type of INT64 or DOUBLE. + returned: success + type: str + utilizationTarget: + description: + - The target value of the metric that autoscaler should maintain. This + must be a positive value. A utilization metric scales number of virtual + machines handling requests to increase or decrease proportionally + to the metric. + - For example, a good metric to use as a utilizationTarget is U(www.googleapis.com/compute/instance/network/received_bytes_count). + - The autoscaler will work to keep this value constant for each of the + instances. + returned: success + type: str + utilizationTargetType: + description: + - Defines how target utilization value is expressed for a Stackdriver + Monitoring metric. + returned: success + type: str + loadBalancingUtilization: + description: + - Configuration parameters of autoscaling based on a load balancer. + returned: success + type: complex + contains: + utilizationTarget: + description: + - Fraction of backend capacity utilization (set in HTTP(s) load balancing + configuration) that autoscaler should maintain. Must be a positive + float value. If not defined, the default is 0.8. + returned: success + type: str + target: + description: + - URL of the managed instance group that this autoscaler will scale. + returned: success + type: dict + zone: + description: + - URL of the zone where the instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/autoscalers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket.py new file mode 100644 index 000000000..5746a0bab --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_backend_bucket +description: +- Backend buckets allow you to use Google Cloud Storage buckets with HTTP(S) load + balancing. +- An HTTP(S) load balancer can direct traffic to specified URLs to a backend bucket + rather than a backend service. It can send requests for static content to a Cloud + Storage bucket and requests for dynamic content to a virtual machine instance. +short_description: Creates a GCP BackendBucket +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + bucket_name: + description: + - Cloud Storage bucket name. + required: true + type: str + cdn_policy: + description: + - Cloud CDN configuration for this Backend Bucket. + required: false + type: dict + suboptions: + signed_url_cache_max_age_sec: + description: + - Maximum number of seconds the response to a signed URL request will be considered + fresh. After this time period, the response will be revalidated before being + served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control header. + The actual headers served in responses will not be altered.' + required: false + type: int + default_ttl: + description: + - Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + required: false + type: int + max_ttl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + required: false + type: int + client_ttl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + required: false + type: int + negative_caching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + required: false + type: bool + negative_caching_policy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's + default cache TTLs. + elements: dict + required: false + type: list + suboptions: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes + 300, 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as values, + and you cannot specify a status code more than once. + required: false + type: int + ttl: + description: + - The TTL (in seconds) for which to cache responses with the corresponding + status code. The maximum allowed value is 1800s (30 minutes), noting + that infrequently accessed objects may be evicted from the cache before + the defined TTL. + required: false + type: int + cache_mode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + - 'Some valid choices include: "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"' + required: false + type: str + serve_while_stale: + description: + - Serve existing content from the cache (if available) when revalidating content + with the origin, or when an error is encountered when refreshing the cache. + required: false + type: int + custom_response_headers: + description: + - Headers that the HTTP/S load balancer should add to proxied responses. + elements: str + required: false + type: list + description: + description: + - An optional textual description of the resource; provided by the client when + the resource is created. + required: false + type: str + enable_cdn: + description: + - If true, enable Cloud CDN for this BackendBucket. + required: false + type: bool + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/backendBuckets)' +- 'Using a Cloud Storage bucket as a load balancer backend: U(https://cloud.google.com/compute/docs/load-balancing/http/backend-bucket)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a bucket + google.cloud.gcp_storage_bucket: + name: bucket-backendbucket + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: bucket + +- name: create a backend bucket + google.cloud.gcp_compute_backend_bucket: + name: test_object + bucket_name: "{{ bucket.name }}" + description: A BackendBucket to connect LNB w/ Storage Bucket + enable_cdn: 'true' + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +bucketName: + description: + - Cloud Storage bucket name. + returned: success + type: str +cdnPolicy: + description: + - Cloud CDN configuration for this Backend Bucket. + returned: success + type: complex + contains: + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be considered + fresh. After this time period, the response will be revalidated before being + served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control header. The + actual headers served in responses will not be altered.' + returned: success + type: int + defaultTtl: + description: + - Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + returned: success + type: int + maxTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + clientTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + negativeCaching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + returned: success + type: bool + negativeCachingPolicy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's + default cache TTLs. + returned: success + type: complex + contains: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes 300, + 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as values, + and you cannot specify a status code more than once. + returned: success + type: int + ttl: + description: + - The TTL (in seconds) for which to cache responses with the corresponding + status code. The maximum allowed value is 1800s (30 minutes), noting that + infrequently accessed objects may be evicted from the cache before the + defined TTL. + returned: success + type: int + cacheMode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + returned: success + type: str + serveWhileStale: + description: + - Serve existing content from the cache (if available) when revalidating content + with the origin, or when an error is encountered when refreshing the cache. + returned: success + type: int +customResponseHeaders: + description: + - Headers that the HTTP/S load balancer should add to proxied responses. + returned: success + type: list +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional textual description of the resource; provided by the client when the + resource is created. + returned: success + type: str +enableCdn: + description: + - If true, enable Cloud CDN for this BackendBucket. + returned: success + type: bool +id: + description: + - Unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + bucket_name=dict(required=True, type='str'), + cdn_policy=dict( + type='dict', + options=dict( + signed_url_cache_max_age_sec=dict(type='int'), + default_ttl=dict(type='int'), + max_ttl=dict(type='int'), + client_ttl=dict(type='int'), + negative_caching=dict(type='bool'), + negative_caching_policy=dict(type='list', elements='dict', options=dict(code=dict(type='int'), ttl=dict(type='int'))), + cache_mode=dict(type='str'), + serve_while_stale=dict(type='int'), + ), + ), + custom_response_headers=dict(type='list', elements='str'), + description=dict(type='str'), + enable_cdn=dict(type='bool'), + name=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#backendBucket' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#backendBucket', + u'bucketName': module.params.get('bucket_name'), + u'cdnPolicy': BackendBucketCdnpolicy(module.params.get('cdn_policy', {}), module).to_request(), + u'customResponseHeaders': module.params.get('custom_response_headers'), + u'description': module.params.get('description'), + u'enableCdn': module.params.get('enable_cdn'), + u'name': module.params.get('name'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/backendBuckets/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/backendBuckets".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'bucketName': response.get(u'bucketName'), + u'cdnPolicy': BackendBucketCdnpolicy(response.get(u'cdnPolicy', {}), module).from_response(), + u'customResponseHeaders': response.get(u'customResponseHeaders'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'enableCdn': response.get(u'enableCdn'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#backendBucket') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class BackendBucketCdnpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'signedUrlCacheMaxAgeSec': self.request.get('signed_url_cache_max_age_sec'), + u'defaultTtl': self.request.get('default_ttl'), + u'maxTtl': self.request.get('max_ttl'), + u'clientTtl': self.request.get('client_ttl'), + u'negativeCaching': self.request.get('negative_caching'), + u'negativeCachingPolicy': BackendBucketNegativecachingpolicyArray(self.request.get('negative_caching_policy', []), self.module).to_request(), + u'cacheMode': self.request.get('cache_mode'), + u'serveWhileStale': self.request.get('serve_while_stale'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'signedUrlCacheMaxAgeSec': self.request.get(u'signedUrlCacheMaxAgeSec'), + u'defaultTtl': self.request.get(u'defaultTtl'), + u'maxTtl': self.request.get(u'maxTtl'), + u'clientTtl': self.request.get(u'clientTtl'), + u'negativeCaching': self.request.get(u'negativeCaching'), + u'negativeCachingPolicy': BackendBucketNegativecachingpolicyArray(self.request.get(u'negativeCachingPolicy', []), self.module).from_response(), + u'cacheMode': self.request.get(u'cacheMode'), + u'serveWhileStale': self.request.get(u'serveWhileStale'), + } + ) + + +class BackendBucketNegativecachingpolicyArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'code': item.get('code'), u'ttl': item.get('ttl')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'code': item.get(u'code'), u'ttl': item.get(u'ttl')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket_info.py new file mode 100644 index 000000000..31d098398 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_bucket_info.py @@ -0,0 +1,306 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_backend_bucket_info +description: +- Gather info for GCP BackendBucket +short_description: Gather info for GCP BackendBucket +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a backend bucket + gcp_compute_backend_bucket_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + bucketName: + description: + - Cloud Storage bucket name. + returned: success + type: str + cdnPolicy: + description: + - Cloud CDN configuration for this Backend Bucket. + returned: success + type: complex + contains: + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be + considered fresh. After this time period, the response will be revalidated + before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: + public, max-age=[TTL]" header, regardless of any existing Cache-Control + header. The actual headers served in responses will not be altered.' + returned: success + type: int + defaultTtl: + description: + - Specifies the default TTL for cached content served by this origin for + responses that do not have an existing valid TTL (max-age or s-max-age). + returned: success + type: int + maxTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + clientTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + negativeCaching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + returned: success + type: bool + negativeCachingPolicy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud + CDN's default cache TTLs. + returned: success + type: complex + contains: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes + 300, 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as + values, and you cannot specify a status code more than once. + returned: success + type: int + ttl: + description: + - The TTL (in seconds) for which to cache responses with the corresponding + status code. The maximum allowed value is 1800s (30 minutes), noting + that infrequently accessed objects may be evicted from the cache before + the defined TTL. + returned: success + type: int + cacheMode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + returned: success + type: str + serveWhileStale: + description: + - Serve existing content from the cache (if available) when revalidating + content with the origin, or when an error is encountered when refreshing + the cache. + returned: success + type: int + customResponseHeaders: + description: + - Headers that the HTTP/S load balancer should add to proxied responses. + returned: success + type: list + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional textual description of the resource; provided by the client when + the resource is created. + returned: success + type: str + enableCdn: + description: + - If true, enable Cloud CDN for this BackendBucket. + returned: success + type: bool + id: + description: + - Unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/backendBuckets".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service.py new file mode 100644 index 000000000..a637a9e9b --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service.py @@ -0,0 +1,2268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: gcp_compute_backend_service +description: +- A Backend Service defines a group of virtual machines that will serve traffic for + load balancing. This resource is a global backend service, appropriate for external + load balancing or self-managed internal load balancing. +- For managed internal load balancing, use a regional backend service instead. +- Currently self-managed internal load balancing is only available in beta. +short_description: Creates a GCP BackendService +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + affinity_cookie_ttl_sec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set + to 0, the cookie is non-persistent and lasts only until the end of the browser + session (or equivalent). The maximum allowed value for TTL is one day. + - When the load balancing scheme is INTERNAL, this field is not used. + required: false + type: int + backends: + description: + - The set of backends that serve this BackendService. + elements: dict + required: false + type: list + suboptions: + balancing_mode: + description: + - Specifies the balancing mode for this backend. + - For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. + Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL). + - 'Some valid choices include: "UTILIZATION", "RATE", "CONNECTION"' + required: false + default: UTILIZATION + type: str + capacity_scaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based on + UTILIZATION, RATE or CONNECTION). + - Default value is 1, which means the group will serve up to 100% of its configured + capacity (depending on balancingMode). A setting of 0 means the group is + completely drained, offering 0% of its available Capacity. Valid range is + [0.0,1.0]. + required: false + default: '1.0' + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + required: false + type: str + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group resource. + In case of instance group this defines the list of instances that serve + traffic. Member virtual machine instances from each instance group must + live in the same zone as the instance group itself. No two backends in a + backend service are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group backends. + - Note that you must specify an Instance Group or Network Endpoint Group resource + using the fully-qualified URL, rather than a partial URL. + required: true + type: str + max_connections: + description: + - The max number of simultaneous connections for the group. Can be used with + either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + required: false + type: int + max_connections_per_instance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. This is used to calculate the capacity of the group. Can be + used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance + must be set. + required: false + type: int + max_connections_per_endpoint: + description: + - The max number of simultaneous connections that a single backend network + endpoint can handle. This is used to calculate the capacity of the group. + Can be used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint + must be set. + required: false + type: int + max_rate: + description: + - The max requests per second (RPS) of the group. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance + or maxRatePerEndpoint, as appropriate for group type, must be set. + required: false + type: int + max_rate_per_instance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must + be set. + required: false + type: str + max_rate_per_endpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint + can handle. This is used to calculate the capacity of the group. Can be + used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. + required: false + type: str + max_utilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. Valid range is [0.0, 1.0]. + required: false + type: str + circuit_breakers: + description: + - Settings controlling the volume of connections to a backend service. This field + is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + required: false + type: dict + suboptions: + max_requests_per_connection: + description: + - Maximum requests for a single backend connection. This parameter is respected + by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there + is no limit. Setting this parameter to 1 will effectively disable keep alive. + required: false + type: int + max_connections: + description: + - The maximum number of connections to the backend cluster. + - Defaults to 1024. + required: false + default: '1024' + type: int + max_pending_requests: + description: + - The maximum number of pending requests to the backend cluster. + - Defaults to 1024. + required: false + default: '1024' + type: int + max_requests: + description: + - The maximum number of parallel requests to the backend cluster. + - Defaults to 1024. + required: false + default: '1024' + type: int + max_retries: + description: + - The maximum number of parallel retries to the backend cluster. + - Defaults to 3. + required: false + default: '3' + type: int + consistent_hash: + description: + - Consistent Hash-based load balancing can be used to provide soft session affinity + based on HTTP headers, cookies or other properties. This load balancing policy + is applicable only for HTTP connections. The affinity to a particular destination + host will be lost when one or more hosts are added/removed from the destination + service. This field specifies parameters that control consistent hashing. This + field only applies if the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + This field is only applicable when locality_lb_policy is set to MAGLEV or RING_HASH. + required: false + type: dict + suboptions: + http_cookie: + description: + - Hash is based on HTTP Cookie. This field describes a HTTP cookie that will + be used as the hash key for the consistent hash load balancer. If the cookie + is not present, it will be generated. + - This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + required: false + type: dict + suboptions: + ttl: + description: + - Lifetime of the cookie. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. + - Must be from 0 to 315,576,000,000 inclusive. + required: true + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + name: + description: + - Name of the cookie. + required: false + type: str + path: + description: + - Path to set for the cookie. + required: false + type: str + http_header_name: + description: + - The hash based on the value of the specified header field. + - This field is applicable if the sessionAffinity is set to HEADER_FIELD. + required: false + type: str + minimum_ring_size: + description: + - The minimum number of virtual nodes to use for the hash ring. + - Larger ring sizes result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the ring size, each host + will be assigned a single virtual node. + - Defaults to 1024. + required: false + default: '1024' + type: int + cdn_policy: + description: + - Cloud CDN configuration for this BackendService. + required: false + type: dict + suboptions: + cache_key_policy: + description: + - The CacheKeyPolicy for this CdnPolicy. + required: false + type: dict + suboptions: + include_host: + description: + - If true requests to different hosts will be cached separately. + required: false + type: bool + include_protocol: + description: + - If true, http and https requests will be cached separately. + required: false + type: bool + include_query_string: + description: + - If true, include query string parameters in the cache key according + to query_string_whitelist and query_string_blacklist. If neither is + set, the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + required: false + type: bool + query_string_blacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + elements: str + required: false + type: list + query_string_whitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + elements: str + required: false + type: list + signed_url_cache_max_age_sec: + description: + - Maximum number of seconds the response to a signed URL request will be considered + fresh, defaults to 1hr (3600s). After this time period, the response will + be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control header. + The actual headers served in responses will not be altered.' + required: false + default: '3600' + type: int + default_ttl: + description: + - Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + required: false + type: int + max_ttl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + required: false + type: int + client_ttl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + required: false + type: int + negative_caching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + required: false + type: bool + negative_caching_policy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's + default cache TTLs. + elements: dict + required: false + type: list + suboptions: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes + 300, 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as values, + and you cannot specify a status code more than once. + required: false + type: int + ttl: + description: + - The TTL (in seconds) for which to cache responses with the corresponding + status code. The maximum allowed value is 1800s (30 minutes), noting + that infrequently accessed objects may be evicted from the cache before + the defined TTL. + required: false + type: int + cache_mode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + - 'Some valid choices include: "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"' + required: false + type: str + serve_while_stale: + description: + - Serve existing content from the cache (if available) when revalidating content + with the origin, or when an error is encountered when refreshing the cache. + required: false + type: int + connection_draining: + description: + - Settings for connection draining . + required: false + type: dict + suboptions: + draining_timeout_sec: + description: + - Time for which instance will be drained (not accept new connections, but + still work to finish started). + required: false + default: '300' + type: int + custom_request_headers: + description: + - Headers that the HTTP/S load balancer should add to proxied requests. + elements: str + required: false + type: list + custom_response_headers: + description: + - Headers that the HTTP/S load balancer should add to proxied responses. + elements: str + required: false + type: list + description: + description: + - An optional description of this resource. + required: false + type: str + enable_cdn: + description: + - If true, enable Cloud CDN for this BackendService. + required: false + type: bool + health_checks: + description: + - The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health + checking this BackendService. Currently at most one health check can be specified. + - A health check must be specified unless the backend service uses an internet + or serverless NEG as a backend. + - For internal load balancing, a URL to a HealthCheck resource must be specified + instead. + elements: str + required: false + type: list + iap: + description: + - Settings for enabling Cloud Identity Aware Proxy. + required: false + type: dict + suboptions: + enabled: + description: + - Enables IAP. + required: false + type: bool + oauth2_client_id: + description: + - OAuth2 Client ID for IAP . + required: true + type: str + oauth2_client_secret: + description: + - OAuth2 Client Secret for IAP . + required: true + type: str + load_balancing_scheme: + description: + - Indicates whether the backend service will be used with internal or external + load balancing. A backend service created for one type of load balancing cannot + be used with the other. + - 'Some valid choices include: "EXTERNAL", "INTERNAL_SELF_MANAGED"' + required: false + default: EXTERNAL + type: str + locality_lb_policy: + description: + - The load balancing algorithm used within the scope of the locality. + - The possible values are - * ROUND_ROBIN - This is a simple policy in which each + healthy backend is selected in round robin order. + - "* LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts + and picks the host which has fewer active requests." + - "* RING_HASH - The ring/modulo hash load balancer implements consistent hashing + to backends. The algorithm has the property that the addition/removal of a host + from a set of N hosts only affects 1/N of the requests." + - "* RANDOM - The load balancer selects a random healthy host." + - "* ORIGINAL_DESTINATION - Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address as the destination + address of the incoming connection before the connection was redirected to the + load balancer." + - "* MAGLEV - used as a drop in replacement for the ring hash load balancer." + - Maglev is not as stable as ring hash but has faster table lookup build times + and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 + This field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + - 'Some valid choices include: "ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", + "ORIGINAL_DESTINATION", "MAGLEV"' + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + outlier_detection: + description: + - Settings controlling eviction of unhealthy hosts from the load balancing pool. + - This field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + required: false + type: dict + suboptions: + base_ejection_time: + description: + - The base time that a host is ejected for. The real time is equal to the + base time multiplied by the number of times the host has been ejected. Defaults + to 30000ms or 30s. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` field + and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + required: false + type: int + consecutive_errors: + description: + - Number of errors before a host is ejected from the connection pool. When + the backend host is accessed over HTTP, a 5xx return code qualifies as an + error. + - Defaults to 5. + required: false + default: '5' + type: int + consecutive_gateway_failure: + description: + - The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + required: false + default: '5' + type: int + enforcing_consecutive_errors: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to + disable ejection or to ramp it up slowly. Defaults to 100. + required: false + default: '100' + type: int + enforcing_consecutive_gateway_failure: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting can + be used to disable ejection or to ramp it up slowly. Defaults to 0. + required: false + type: int + enforcing_success_rate: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be + used to disable ejection or to ramp it up slowly. Defaults to 100. + required: false + default: '100' + type: int + interval: + description: + - Time interval between ejection sweep analysis. This can result in both new + ejections as well as hosts being returned to service. Defaults to 10 seconds. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` field + and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + required: false + type: int + max_ejection_percent: + description: + - Maximum percentage of hosts in the load balancing pool for the backend service + that can be ejected. Defaults to 10%. + required: false + default: '10' + type: int + success_rate_minimum_hosts: + description: + - The number of hosts in a cluster that must have enough request volume to + detect success rate outliers. If the number of hosts is less than this setting, + outlier detection via success rate statistics is not performed for any host + in the cluster. Defaults to 5. + required: false + default: '5' + type: int + success_rate_request_volume: + description: + - The minimum number of total requests that must be collected in one interval + (as defined by the interval duration above) to include this host in success + rate based outlier detection. If the volume is lower than this setting, + outlier detection via success rate statistics is not performed for that + host. Defaults to 100. + required: false + default: '100' + type: int + success_rate_stdev_factor: + description: + - 'This factor is used to determine the ejection threshold for success rate + outlier ejection. The ejection threshold is the difference between the mean + success rate, and the product of this factor and the standard deviation + of the mean success rate: mean - (stdev * success_rate_stdev_factor). This + factor is divided by a thousand to get a double. That is, if the desired + factor is 1.9, the runtime value should be 1900. Defaults to 1900.' + required: false + default: '1900' + type: int + port_name: + description: + - Name of backend port. The same name should appear in the instance groups referenced + by this service. Required when the load balancing scheme is EXTERNAL. + required: false + type: str + protocol: + description: + - The protocol this BackendService uses to communicate with backends. + - 'The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API.' + - 'Some valid choices include: "HTTP", "HTTPS", "HTTP2", "TCP", "SSL", "GRPC"' + required: false + type: str + security_policy: + description: + - The security policy associated with this backend service. + required: false + type: str + session_affinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is not + applicable if the protocol is UDP. + - 'Some valid choices include: "NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", + "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"' + required: false + type: str + timeout_sec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + required: false + type: int + aliases: + - timeout_seconds + log_config: + description: + - This field denotes the logging options for the load balancer traffic served + by this backend service. + - If logging is enabled, logs will be exported to Stackdriver. + required: false + type: dict + suboptions: + enable: + description: + - Whether to enable logging for the load balancer traffic served by this backend + service. + required: false + type: bool + sample_rate: + description: + - This field can only be specified if logging is enabled for this backend + service. The value of the field must be in [0, 1]. This configures the sampling + rate of requests to the load balancer where 1.0 means all logged requests + are reported and 0.0 means no logged requests are reported. + - The default value is 1.0. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/backendServices)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +""" + +EXAMPLES = """ +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-backendservice + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: httphealthcheck-backendservice + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: test_object + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + enable_cdn: 'true' + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +""" + +RETURN = """ +affinityCookieTtlSec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set + to 0, the cookie is non-persistent and lasts only until the end of the browser + session (or equivalent). The maximum allowed value for TTL is one day. + - When the load balancing scheme is INTERNAL, this field is not used. + returned: success + type: int +backends: + description: + - The set of backends that serve this BackendService. + returned: success + type: complex + contains: + balancingMode: + description: + - Specifies the balancing mode for this backend. + - For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. + Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL). + returned: success + type: str + capacityScaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, + RATE or CONNECTION). + - Default value is 1, which means the group will serve up to 100% of its configured + capacity (depending on balancingMode). A setting of 0 means the group is completely + drained, offering 0% of its available Capacity. Valid range is [0.0,1.0]. + returned: success + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + returned: success + type: str + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group resource. + In case of instance group this defines the list of instances that serve traffic. + Member virtual machine instances from each instance group must live in the + same zone as the instance group itself. No two backends in a backend service + are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group backends. + - Note that you must specify an Instance Group or Network Endpoint Group resource + using the fully-qualified URL, rather than a partial URL. + returned: success + type: str + maxConnections: + description: + - The max number of simultaneous connections for the group. Can be used with + either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxConnectionsPerInstance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. This is used to calculate the capacity of the group. Can be used + in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must + be set. + returned: success + type: int + maxConnectionsPerEndpoint: + description: + - The max number of simultaneous connections that a single backend network endpoint + can handle. This is used to calculate the capacity of the group. Can be used + in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must + be set. + returned: success + type: int + maxRate: + description: + - The max requests per second (RPS) of the group. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance or + maxRatePerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxRatePerInstance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be + set. + returned: success + type: str + maxRatePerEndpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint can + handle. This is used to calculate the capacity of the group. Can be used in + either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. + returned: success + type: str + maxUtilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. Valid range is [0.0, 1.0]. + returned: success + type: str +circuitBreakers: + description: + - Settings controlling the volume of connections to a backend service. This field + is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + maxRequestsPerConnection: + description: + - Maximum requests for a single backend connection. This parameter is respected + by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there is + no limit. Setting this parameter to 1 will effectively disable keep alive. + returned: success + type: int + maxConnections: + description: + - The maximum number of connections to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxPendingRequests: + description: + - The maximum number of pending requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRequests: + description: + - The maximum number of parallel requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRetries: + description: + - The maximum number of parallel retries to the backend cluster. + - Defaults to 3. + returned: success + type: int +consistentHash: + description: + - Consistent Hash-based load balancing can be used to provide soft session affinity + based on HTTP headers, cookies or other properties. This load balancing policy + is applicable only for HTTP connections. The affinity to a particular destination + host will be lost when one or more hosts are added/removed from the destination + service. This field specifies parameters that control consistent hashing. This + field only applies if the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + This field is only applicable when locality_lb_policy is set to MAGLEV or RING_HASH. + returned: success + type: complex + contains: + httpCookie: + description: + - Hash is based on HTTP Cookie. This field describes a HTTP cookie that will + be used as the hash key for the consistent hash load balancer. If the cookie + is not present, it will be generated. + - This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + returned: success + type: complex + contains: + ttl: + description: + - Lifetime of the cookie. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. + - Must be from 0 to 315,576,000,000 inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds field + and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + name: + description: + - Name of the cookie. + returned: success + type: str + path: + description: + - Path to set for the cookie. + returned: success + type: str + httpHeaderName: + description: + - The hash based on the value of the specified header field. + - This field is applicable if the sessionAffinity is set to HEADER_FIELD. + returned: success + type: str + minimumRingSize: + description: + - The minimum number of virtual nodes to use for the hash ring. + - Larger ring sizes result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the ring size, each host + will be assigned a single virtual node. + - Defaults to 1024. + returned: success + type: int +cdnPolicy: + description: + - Cloud CDN configuration for this BackendService. + returned: success + type: complex + contains: + cacheKeyPolicy: + description: + - The CacheKeyPolicy for this CdnPolicy. + returned: success + type: complex + contains: + includeHost: + description: + - If true requests to different hosts will be cached separately. + returned: success + type: bool + includeProtocol: + description: + - If true, http and https requests will be cached separately. + returned: success + type: bool + includeQueryString: + description: + - If true, include query string parameters in the cache key according to + query_string_whitelist and query_string_blacklist. If neither is set, + the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + returned: success + type: bool + queryStringBlacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + queryStringWhitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be considered + fresh, defaults to 1hr (3600s). After this time period, the response will + be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control header. The + actual headers served in responses will not be altered.' + returned: success + type: int + defaultTtl: + description: + - Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + returned: success + type: int + maxTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + clientTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + negativeCaching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + returned: success + type: bool + negativeCachingPolicy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's + default cache TTLs. + returned: success + type: complex + contains: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes 300, + 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as values, + and you cannot specify a status code more than once. + returned: success + type: int + ttl: + description: + - The TTL (in seconds) for which to cache responses with the corresponding + status code. The maximum allowed value is 1800s (30 minutes), noting that + infrequently accessed objects may be evicted from the cache before the + defined TTL. + returned: success + type: int + cacheMode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + returned: success + type: str + serveWhileStale: + description: + - Serve existing content from the cache (if available) when revalidating content + with the origin, or when an error is encountered when refreshing the cache. + returned: success + type: int +connectionDraining: + description: + - Settings for connection draining . + returned: success + type: complex + contains: + drainingTimeoutSec: + description: + - Time for which instance will be drained (not accept new connections, but still + work to finish started). + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +customRequestHeaders: + description: + - Headers that the HTTP/S load balancer should add to proxied requests. + returned: success + type: list +customResponseHeaders: + description: + - Headers that the HTTP/S load balancer should add to proxied responses. + returned: success + type: list +fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +enableCDN: + description: + - If true, enable Cloud CDN for this BackendService. + returned: success + type: bool +healthChecks: + description: + - The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health + checking this BackendService. Currently at most one health check can be specified. + - A health check must be specified unless the backend service uses an internet or + serverless NEG as a backend. + - For internal load balancing, a URL to a HealthCheck resource must be specified + instead. + returned: success + type: list +id: + description: + - The unique identifier for the resource. + returned: success + type: int +iap: + description: + - Settings for enabling Cloud Identity Aware Proxy. + returned: success + type: complex + contains: + enabled: + description: + - Enables IAP. + returned: success + type: bool + oauth2ClientId: + description: + - OAuth2 Client ID for IAP . + returned: success + type: str + oauth2ClientSecret: + description: + - OAuth2 Client Secret for IAP . + returned: success + type: str + oauth2ClientSecretSha256: + description: + - OAuth2 Client Secret SHA-256 for IAP . + returned: success + type: str +loadBalancingScheme: + description: + - Indicates whether the backend service will be used with internal or external load + balancing. A backend service created for one type of load balancing cannot be + used with the other. + returned: success + type: str +localityLbPolicy: + description: + - The load balancing algorithm used within the scope of the locality. + - The possible values are - * ROUND_ROBIN - This is a simple policy in which each + healthy backend is selected in round robin order. + - "* LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts and + picks the host which has fewer active requests." + - "* RING_HASH - The ring/modulo hash load balancer implements consistent hashing + to backends. The algorithm has the property that the addition/removal of a host + from a set of N hosts only affects 1/N of the requests." + - "* RANDOM - The load balancer selects a random healthy host." + - "* ORIGINAL_DESTINATION - Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address as the destination + address of the incoming connection before the connection was redirected to the + load balancer." + - "* MAGLEV - used as a drop in replacement for the ring hash load balancer." + - Maglev is not as stable as ring hash but has faster table lookup build times and + host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 + This field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +outlierDetection: + description: + - Settings controlling eviction of unhealthy hosts from the load balancing pool. + - This field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + baseEjectionTime: + description: + - The base time that a host is ejected for. The real time is equal to the base + time multiplied by the number of times the host has been ejected. Defaults + to 30000ms or 30s. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + consecutiveErrors: + description: + - Number of errors before a host is ejected from the connection pool. When the + backend host is accessed over HTTP, a 5xx return code qualifies as an error. + - Defaults to 5. + returned: success + type: int + consecutiveGatewayFailure: + description: + - The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + returned: success + type: int + enforcingConsecutiveErrors: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to disable + ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + enforcingConsecutiveGatewayFailure: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting can + be used to disable ejection or to ramp it up slowly. Defaults to 0. + returned: success + type: int + enforcingSuccessRate: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be used + to disable ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + interval: + description: + - Time interval between ejection sweep analysis. This can result in both new + ejections as well as hosts being returned to service. Defaults to 10 seconds. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + maxEjectionPercent: + description: + - Maximum percentage of hosts in the load balancing pool for the backend service + that can be ejected. Defaults to 10%. + returned: success + type: int + successRateMinimumHosts: + description: + - The number of hosts in a cluster that must have enough request volume to detect + success rate outliers. If the number of hosts is less than this setting, outlier + detection via success rate statistics is not performed for any host in the + cluster. Defaults to 5. + returned: success + type: int + successRateRequestVolume: + description: + - The minimum number of total requests that must be collected in one interval + (as defined by the interval duration above) to include this host in success + rate based outlier detection. If the volume is lower than this setting, outlier + detection via success rate statistics is not performed for that host. Defaults + to 100. + returned: success + type: int + successRateStdevFactor: + description: + - 'This factor is used to determine the ejection threshold for success rate + outlier ejection. The ejection threshold is the difference between the mean + success rate, and the product of this factor and the standard deviation of + the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor + is divided by a thousand to get a double. That is, if the desired factor is + 1.9, the runtime value should be 1900. Defaults to 1900.' + returned: success + type: int +portName: + description: + - Name of backend port. The same name should appear in the instance groups referenced + by this service. Required when the load balancing scheme is EXTERNAL. + returned: success + type: str +protocol: + description: + - The protocol this BackendService uses to communicate with backends. + - 'The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API.' + returned: success + type: str +securityPolicy: + description: + - The security policy associated with this backend service. + returned: success + type: str +sessionAffinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is not + applicable if the protocol is UDP. + returned: success + type: str +timeoutSec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + returned: success + type: int +logConfig: + description: + - This field denotes the logging options for the load balancer traffic served by + this backend service. + - If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: complex + contains: + enable: + description: + - Whether to enable logging for the load balancer traffic served by this backend + service. + returned: success + type: bool + sampleRate: + description: + - This field can only be specified if logging is enabled for this backend service. + The value of the field must be in [0, 1]. This configures the sampling rate + of requests to the load balancer where 1.0 means all logged requests are reported + and 0.0 means no logged requests are reported. + - The default value is 1.0. + returned: success + type: str +""" + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"], type="str"), + affinity_cookie_ttl_sec=dict(type="int"), + fingerprint=dict(type="str"), + backends=dict( + type="list", + elements="dict", + options=dict( + balancing_mode=dict(default="UTILIZATION", type="str"), + # TODO: capacity_scaler does some value normalization + # server-side, so there needs to be a way to do proper + # value comparison. + capacity_scaler=dict(default="1", type="str"), + description=dict(type="str"), + group=dict(required=True, type="str"), + max_connections=dict(type="int"), + max_connections_per_instance=dict(type="int"), + max_connections_per_endpoint=dict(type="int"), + max_rate=dict(type="int"), + max_rate_per_instance=dict(type="str"), + max_rate_per_endpoint=dict(type="str"), + max_utilization=dict(type="str"), + ), + ), + circuit_breakers=dict( + type="dict", + options=dict( + max_requests_per_connection=dict(type="int"), + max_connections=dict(default=1024, type="int"), + max_pending_requests=dict(default=1024, type="int"), + max_requests=dict(default=1024, type="int"), + max_retries=dict(default=3, type="int"), + ), + ), + consistent_hash=dict( + type="dict", + options=dict( + http_cookie=dict( + type="dict", + options=dict( + ttl=dict( + type="dict", + options=dict( + seconds=dict(required=True, type="int"), + nanos=dict(type="int"), + ), + ), + name=dict(type="str"), + path=dict(type="str"), + ), + ), + http_header_name=dict(type="str"), + minimum_ring_size=dict(default=1024, type="int"), + ), + ), + cdn_policy=dict( + type="dict", + options=dict( + cache_key_policy=dict( + type="dict", + options=dict( + include_host=dict(type="bool"), + include_protocol=dict(type="bool"), + include_query_string=dict(type="bool"), + query_string_blacklist=dict(type="list", elements="str"), + query_string_whitelist=dict(type="list", elements="str"), + ), + ), + signed_url_cache_max_age_sec=dict(default=3600, type="int"), + default_ttl=dict(type="int"), + max_ttl=dict(type="int"), + client_ttl=dict(type="int"), + negative_caching=dict(type="bool"), + negative_caching_policy=dict( + type="list", + elements="dict", + options=dict(code=dict(type="int"), ttl=dict(type="int")), + ), + cache_mode=dict(type="str"), + serve_while_stale=dict(type="int"), + ), + ), + connection_draining=dict( + type="dict", + options=dict(draining_timeout_sec=dict(default=300, type="int")), + ), + custom_request_headers=dict(type="list", elements="str"), + custom_response_headers=dict(type="list", elements="str"), + description=dict(type="str"), + enable_cdn=dict(type="bool"), + health_checks=dict(type="list", elements="str"), + iap=dict( + type="dict", + options=dict( + enabled=dict(type="bool"), + oauth2_client_id=dict(required=True, type="str"), + oauth2_client_secret=dict(required=True, type="str", no_log=True), + ), + ), + load_balancing_scheme=dict(default="EXTERNAL", type="str"), + locality_lb_policy=dict(type="str"), + name=dict(required=True, type="str"), + outlier_detection=dict( + type="dict", + options=dict( + base_ejection_time=dict( + type="dict", + options=dict( + seconds=dict(required=True, type="int"), + nanos=dict(type="int"), + ), + ), + consecutive_errors=dict(default=5, type="int"), + consecutive_gateway_failure=dict(default=5, type="int"), + enforcing_consecutive_errors=dict(default=100, type="int"), + enforcing_consecutive_gateway_failure=dict(default=0, type="int"), + enforcing_success_rate=dict(default=100, type="int"), + interval=dict( + type="dict", + options=dict( + seconds=dict(required=True, type="int"), + nanos=dict(type="int"), + ), + ), + max_ejection_percent=dict(default=10, type="int"), + success_rate_minimum_hosts=dict(default=5, type="int"), + success_rate_request_volume=dict(default=100, type="int"), + success_rate_stdev_factor=dict(default=1900, type="int"), + ), + ), + port_name=dict(type="str"), + protocol=dict(type="str"), + security_policy=dict(type="str"), + session_affinity=dict(type="str"), + timeout_sec=dict(type="int", aliases=["timeout_seconds"]), + log_config=dict( + type="dict", + options=dict(enable=dict(type="bool"), sample_rate=dict(type="str")), + ), + ) + ) + + if not module.params["scopes"]: + module.params["scopes"] = ["https://www.googleapis.com/auth/compute"] + + state = module.params["state"] + kind = "compute#backendService" + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + module.params["fingerprint"] = fetch["fingerprint"] + if state == "present": + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == "present": + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({"changed": changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, "compute") + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, "compute") + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, "compute") + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + "kind": "compute#backendService", + "affinityCookieTtlSec": module.params.get("affinity_cookie_ttl_sec"), + "backends": BackendServiceBackendsArray( + module.params.get("backends", []), module + ).to_request(), + "circuitBreakers": BackendServiceCircuitbreakers( + module.params.get("circuit_breakers", {}), module + ).to_request(), + "consistentHash": BackendServiceConsistenthash( + module.params.get("consistent_hash", {}), module + ).to_request(), + "cdnPolicy": BackendServiceCdnpolicy( + module.params.get("cdn_policy", {}), module + ).to_request(), + "connectionDraining": BackendServiceConnectiondraining( + module.params.get("connection_draining", {}), module + ).to_request(), + "customRequestHeaders": module.params.get("custom_request_headers"), + "customResponseHeaders": module.params.get("custom_response_headers"), + "description": module.params.get("description"), + "enableCDN": module.params.get("enable_cdn"), + "healthChecks": module.params.get("health_checks"), + "iap": BackendServiceIap(module.params.get("iap", {}), module).to_request(), + "loadBalancingScheme": module.params.get("load_balancing_scheme"), + "localityLbPolicy": module.params.get("locality_lb_policy"), + "name": module.params.get("name"), + "outlierDetection": BackendServiceOutlierdetection( + module.params.get("outlier_detection", {}), module + ).to_request(), + "portName": module.params.get("port_name"), + "protocol": module.params.get("protocol"), + "securityPolicy": module.params.get("security_policy"), + "sessionAffinity": module.params.get("session_affinity"), + "timeoutSec": module.params.get("timeout_sec"), + "logConfig": BackendServiceLogconfig( + module.params.get("log_config", {}), module + ).to_request(), + "fingerprint": module.params.get("fingerprint"), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, "compute") + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/backendServices/{name}".format( + **module.params + ) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/backendServices".format( + **module.params + ) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, "JSONDecodeError", ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ["error", "errors"]): + module.fail_json(msg=navigate_hash(result, ["error", "errors"])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + # req = GcpRequest(request_vals) + # res = GcpRequest(response_vals) + # import epdb; epdb.serve() + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + "affinityCookieTtlSec": response.get("affinityCookieTtlSec"), + "backends": BackendServiceBackendsArray( + response.get("backends", []), module + ).from_response(), + "circuitBreakers": BackendServiceCircuitbreakers( + response.get("circuitBreakers", {}), module + ).from_response(), + "consistentHash": BackendServiceConsistenthash( + response.get("consistentHash", {}), module + ).from_response(), + "cdnPolicy": BackendServiceCdnpolicy( + response.get("cdnPolicy", {}), module + ).from_response(), + "connectionDraining": BackendServiceConnectiondraining( + response.get("connectionDraining", {}), module + ).from_response(), + "creationTimestamp": response.get("creationTimestamp"), + "customRequestHeaders": response.get("customRequestHeaders"), + "customResponseHeaders": response.get("customResponseHeaders"), + "fingerprint": response.get("fingerprint"), + "description": response.get("description"), + "enableCDN": response.get("enableCDN"), + "healthChecks": response.get("healthChecks"), + "id": response.get("id"), + "iap": BackendServiceIap(response.get("iap", {}), module).from_response(), + "loadBalancingScheme": module.params.get("load_balancing_scheme"), + "localityLbPolicy": response.get("localityLbPolicy"), + "name": module.params.get("name"), + "outlierDetection": BackendServiceOutlierdetection( + response.get("outlierDetection", {}), module + ).from_response(), + "portName": response.get("portName"), + "protocol": response.get("protocol"), + "securityPolicy": response.get("securityPolicy"), + "sessionAffinity": response.get("sessionAffinity"), + "timeoutSec": response.get("timeoutSec"), + "logConfig": BackendServiceLogconfig( + response.get("logConfig", {}), module + ).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, "compute#operation") + if op_result is None: + return {} + status = navigate_hash(op_result, ["status"]) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource( + module, navigate_hash(wait_done, ["targetLink"]), "compute#backendService" + ) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ["name"]) + op_uri = async_op_url(module, {"op_id": op_id}) + while status != "DONE": + raise_if_errors(op_result, ["error", "errors"], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, "compute#operation", False) + status = navigate_hash(op_result, ["status"]) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class BackendServiceBackendsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + "balancingMode": item.get("balancing_mode"), + "capacityScaler": item.get("capacity_scaler"), + "description": item.get("description"), + "group": item.get("group"), + "maxConnections": item.get("max_connections"), + "maxConnectionsPerInstance": item.get("max_connections_per_instance"), + "maxConnectionsPerEndpoint": item.get("max_connections_per_endpoint"), + "maxRate": item.get("max_rate"), + "maxRatePerInstance": item.get("max_rate_per_instance"), + "maxRatePerEndpoint": item.get("max_rate_per_endpoint"), + "maxUtilization": item.get("max_utilization"), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + "balancingMode": item.get("balancingMode"), + "capacityScaler": item.get("capacityScaler"), + "description": item.get("description"), + "group": item.get("group"), + "maxConnections": item.get("maxConnections"), + "maxConnectionsPerInstance": item.get("maxConnectionsPerInstance"), + "maxConnectionsPerEndpoint": item.get("maxConnectionsPerEndpoint"), + "maxRate": item.get("maxRate"), + "maxRatePerInstance": item.get("maxRatePerInstance"), + "maxRatePerEndpoint": item.get("maxRatePerEndpoint"), + "maxUtilization": item.get("maxUtilization"), + } + ) + + +class BackendServiceCircuitbreakers(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "maxRequestsPerConnection": self.request.get( + "max_requests_per_connection" + ), + "maxConnections": self.request.get("max_connections"), + "maxPendingRequests": self.request.get("max_pending_requests"), + "maxRequests": self.request.get("max_requests"), + "maxRetries": self.request.get("max_retries"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "maxRequestsPerConnection": self.request.get( + "maxRequestsPerConnection" + ), + "maxConnections": self.request.get("maxConnections"), + "maxPendingRequests": self.request.get("maxPendingRequests"), + "maxRequests": self.request.get("maxRequests"), + "maxRetries": self.request.get("maxRetries"), + } + ) + + +class BackendServiceConsistenthash(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "httpCookie": BackendServiceHttpcookie( + self.request.get("http_cookie", {}), self.module + ).to_request(), + "httpHeaderName": self.request.get("http_header_name"), + "minimumRingSize": self.request.get("minimum_ring_size"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "httpCookie": BackendServiceHttpcookie( + self.request.get("httpCookie", {}), self.module + ).from_response(), + "httpHeaderName": self.request.get("httpHeaderName"), + "minimumRingSize": self.request.get("minimumRingSize"), + } + ) + + +class BackendServiceHttpcookie(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "ttl": BackendServiceTtl( + self.request.get("ttl", {}), self.module + ).to_request(), + "name": self.request.get("name"), + "path": self.request.get("path"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "ttl": BackendServiceTtl( + self.request.get("ttl", {}), self.module + ).from_response(), + "name": self.request.get("name"), + "path": self.request.get("path"), + } + ) + + +class BackendServiceTtl(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {"seconds": self.request.get("seconds"), "nanos": self.request.get("nanos")} + ) + + def from_response(self): + return remove_nones_from_dict( + {"seconds": self.request.get("seconds"), "nanos": self.request.get("nanos")} + ) + + +class BackendServiceCdnpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "cacheKeyPolicy": BackendServiceCachekeypolicy( + self.request.get("cache_key_policy", {}), self.module + ).to_request(), + "signedUrlCacheMaxAgeSec": self.request.get( + "signed_url_cache_max_age_sec" + ), + "defaultTtl": self.request.get("default_ttl"), + "maxTtl": self.request.get("max_ttl"), + "clientTtl": self.request.get("client_ttl"), + "negativeCaching": self.request.get("negative_caching"), + "negativeCachingPolicy": BackendServiceNegativecachingpolicyArray( + self.request.get("negative_caching_policy", []), self.module + ).to_request(), + "cacheMode": self.request.get("cache_mode"), + "serveWhileStale": self.request.get("serve_while_stale"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "cacheKeyPolicy": BackendServiceCachekeypolicy( + self.request.get("cacheKeyPolicy", {}), self.module + ).from_response(), + "signedUrlCacheMaxAgeSec": self.request.get("signedUrlCacheMaxAgeSec"), + "defaultTtl": self.request.get("defaultTtl"), + "maxTtl": self.request.get("maxTtl"), + "clientTtl": self.request.get("clientTtl"), + "negativeCaching": self.request.get("negativeCaching"), + "negativeCachingPolicy": BackendServiceNegativecachingpolicyArray( + self.request.get("negativeCachingPolicy", []), self.module + ).from_response(), + "cacheMode": self.request.get("cacheMode"), + "serveWhileStale": self.request.get("serveWhileStale"), + } + ) + + +class BackendServiceCachekeypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "includeHost": self.request.get("include_host"), + "includeProtocol": self.request.get("include_protocol"), + "includeQueryString": self.request.get("include_query_string"), + "queryStringBlacklist": self.request.get("query_string_blacklist"), + "queryStringWhitelist": self.request.get("query_string_whitelist"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "includeHost": self.request.get("includeHost"), + "includeProtocol": self.request.get("includeProtocol"), + "includeQueryString": self.request.get("includeQueryString"), + "queryStringBlacklist": self.request.get("queryStringBlacklist"), + "queryStringWhitelist": self.request.get("queryStringWhitelist"), + } + ) + + +class BackendServiceNegativecachingpolicyArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + {"code": item.get("code"), "ttl": item.get("ttl")} + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {"code": item.get("code"), "ttl": item.get("ttl")} + ) + + +class BackendServiceConnectiondraining(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {"drainingTimeoutSec": self.request.get("draining_timeout_sec")} + ) + + def from_response(self): + return remove_nones_from_dict( + {"drainingTimeoutSec": self.request.get("drainingTimeoutSec")} + ) + + +class BackendServiceIap(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "enabled": self.request.get("enabled"), + "oauth2ClientId": self.request.get("oauth2_client_id"), + "oauth2ClientSecret": self.request.get("oauth2_client_secret"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "enabled": self.request.get("enabled"), + "oauth2ClientId": self.request.get("oauth2ClientId"), + "oauth2ClientSecret": self.request.get("oauth2ClientSecret"), + } + ) + + +class BackendServiceOutlierdetection(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "baseEjectionTime": BackendServiceBaseejectiontime( + self.request.get("base_ejection_time", {}), self.module + ).to_request(), + "consecutiveErrors": self.request.get("consecutive_errors"), + "consecutiveGatewayFailure": self.request.get( + "consecutive_gateway_failure" + ), + "enforcingConsecutiveErrors": self.request.get( + "enforcing_consecutive_errors" + ), + "enforcingConsecutiveGatewayFailure": self.request.get( + "enforcing_consecutive_gateway_failure" + ), + "enforcingSuccessRate": self.request.get("enforcing_success_rate"), + "interval": BackendServiceInterval( + self.request.get("interval", {}), self.module + ).to_request(), + "maxEjectionPercent": self.request.get("max_ejection_percent"), + "successRateMinimumHosts": self.request.get( + "success_rate_minimum_hosts" + ), + "successRateRequestVolume": self.request.get( + "success_rate_request_volume" + ), + "successRateStdevFactor": self.request.get("success_rate_stdev_factor"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "baseEjectionTime": BackendServiceBaseejectiontime( + self.request.get("baseEjectionTime", {}), self.module + ).from_response(), + "consecutiveErrors": self.request.get("consecutiveErrors"), + "consecutiveGatewayFailure": self.request.get( + "consecutiveGatewayFailure" + ), + "enforcingConsecutiveErrors": self.request.get( + "enforcingConsecutiveErrors" + ), + "enforcingConsecutiveGatewayFailure": self.request.get( + "enforcingConsecutiveGatewayFailure" + ), + "enforcingSuccessRate": self.request.get("enforcingSuccessRate"), + "interval": BackendServiceInterval( + self.request.get("interval", {}), self.module + ).from_response(), + "maxEjectionPercent": self.request.get("maxEjectionPercent"), + "successRateMinimumHosts": self.request.get("successRateMinimumHosts"), + "successRateRequestVolume": self.request.get( + "successRateRequestVolume" + ), + "successRateStdevFactor": self.request.get("successRateStdevFactor"), + } + ) + + +class BackendServiceBaseejectiontime(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {"seconds": self.request.get("seconds"), "nanos": self.request.get("nanos")} + ) + + def from_response(self): + return remove_nones_from_dict( + {"seconds": self.request.get("seconds"), "nanos": self.request.get("nanos")} + ) + + +class BackendServiceInterval(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {"seconds": self.request.get("seconds"), "nanos": self.request.get("nanos")} + ) + + def from_response(self): + return remove_nones_from_dict( + {"seconds": self.request.get("seconds"), "nanos": self.request.get("nanos")} + ) + + +class BackendServiceLogconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + "enable": self.request.get("enable"), + "sampleRate": self.request.get("sample_rate"), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + "enable": self.request.get("enable"), + "sampleRate": self.request.get("sampleRate"), + } + ) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service_info.py new file mode 100644 index 000000000..415b28fd5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_backend_service_info.py @@ -0,0 +1,816 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_backend_service_info +description: +- Gather info for GCP BackendService +short_description: Gather info for GCP BackendService +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a backend service + gcp_compute_backend_service_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + affinityCookieTtlSec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If + set to 0, the cookie is non-persistent and lasts only until the end of the + browser session (or equivalent). The maximum allowed value for TTL is one + day. + - When the load balancing scheme is INTERNAL, this field is not used. + returned: success + type: int + backends: + description: + - The set of backends that serve this BackendService. + returned: success + type: complex + contains: + balancingMode: + description: + - Specifies the balancing mode for this backend. + - For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. + Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL). + returned: success + type: str + capacityScaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based + on UTILIZATION, RATE or CONNECTION). + - Default value is 1, which means the group will serve up to 100% of its + configured capacity (depending on balancingMode). A setting of 0 means + the group is completely drained, offering 0% of its available Capacity. + Valid range is [0.0,1.0]. + returned: success + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + returned: success + type: str + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group + resource. In case of instance group this defines the list of instances + that serve traffic. Member virtual machine instances from each instance + group must live in the same zone as the instance group itself. No two + backends in a backend service are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group + backends. + - Note that you must specify an Instance Group or Network Endpoint Group + resource using the fully-qualified URL, rather than a partial URL. + returned: success + type: str + maxConnections: + description: + - The max number of simultaneous connections for the group. Can be used + with either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxConnectionsPerInstance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. This is used to calculate the capacity of the group. Can be + used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance + must be set. + returned: success + type: int + maxConnectionsPerEndpoint: + description: + - The max number of simultaneous connections that a single backend network + endpoint can handle. This is used to calculate the capacity of the group. + Can be used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint + must be set. + returned: success + type: int + maxRate: + description: + - The max requests per second (RPS) of the group. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance + or maxRatePerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxRatePerInstance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must + be set. + returned: success + type: str + maxRatePerEndpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint + can handle. This is used to calculate the capacity of the group. Can be + used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. + returned: success + type: str + maxUtilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. Valid range is [0.0, 1.0]. + returned: success + type: str + circuitBreakers: + description: + - Settings controlling the volume of connections to a backend service. This + field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + maxRequestsPerConnection: + description: + - Maximum requests for a single backend connection. This parameter is respected + by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there + is no limit. Setting this parameter to 1 will effectively disable keep + alive. + returned: success + type: int + maxConnections: + description: + - The maximum number of connections to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxPendingRequests: + description: + - The maximum number of pending requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRequests: + description: + - The maximum number of parallel requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRetries: + description: + - The maximum number of parallel retries to the backend cluster. + - Defaults to 3. + returned: success + type: int + consistentHash: + description: + - Consistent Hash-based load balancing can be used to provide soft session affinity + based on HTTP headers, cookies or other properties. This load balancing policy + is applicable only for HTTP connections. The affinity to a particular destination + host will be lost when one or more hosts are added/removed from the destination + service. This field specifies parameters that control consistent hashing. + This field only applies if the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + This field is only applicable when locality_lb_policy is set to MAGLEV or + RING_HASH. + returned: success + type: complex + contains: + httpCookie: + description: + - Hash is based on HTTP Cookie. This field describes a HTTP cookie that + will be used as the hash key for the consistent hash load balancer. If + the cookie is not present, it will be generated. + - This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + returned: success + type: complex + contains: + ttl: + description: + - Lifetime of the cookie. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. + - Must be from 0 to 315,576,000,000 inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + name: + description: + - Name of the cookie. + returned: success + type: str + path: + description: + - Path to set for the cookie. + returned: success + type: str + httpHeaderName: + description: + - The hash based on the value of the specified header field. + - This field is applicable if the sessionAffinity is set to HEADER_FIELD. + returned: success + type: str + minimumRingSize: + description: + - The minimum number of virtual nodes to use for the hash ring. + - Larger ring sizes result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the ring size, each + host will be assigned a single virtual node. + - Defaults to 1024. + returned: success + type: int + cdnPolicy: + description: + - Cloud CDN configuration for this BackendService. + returned: success + type: complex + contains: + cacheKeyPolicy: + description: + - The CacheKeyPolicy for this CdnPolicy. + returned: success + type: complex + contains: + includeHost: + description: + - If true requests to different hosts will be cached separately. + returned: success + type: bool + includeProtocol: + description: + - If true, http and https requests will be cached separately. + returned: success + type: bool + includeQueryString: + description: + - If true, include query string parameters in the cache key according + to query_string_whitelist and query_string_blacklist. If neither is + set, the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + returned: success + type: bool + queryStringBlacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + queryStringWhitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be + considered fresh, defaults to 1hr (3600s). After this time period, the + response will be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: + public, max-age=[TTL]" header, regardless of any existing Cache-Control + header. The actual headers served in responses will not be altered.' + returned: success + type: int + defaultTtl: + description: + - Specifies the default TTL for cached content served by this origin for + responses that do not have an existing valid TTL (max-age or s-max-age). + returned: success + type: int + maxTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + clientTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + negativeCaching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + returned: success + type: bool + negativeCachingPolicy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud + CDN's default cache TTLs. + returned: success + type: complex + contains: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes + 300, 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as + values, and you cannot specify a status code more than once. + returned: success + type: int + ttl: + description: + - The TTL (in seconds) for which to cache responses with the corresponding + status code. The maximum allowed value is 1800s (30 minutes), noting + that infrequently accessed objects may be evicted from the cache before + the defined TTL. + returned: success + type: int + cacheMode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + returned: success + type: str + serveWhileStale: + description: + - Serve existing content from the cache (if available) when revalidating + content with the origin, or when an error is encountered when refreshing + the cache. + returned: success + type: int + connectionDraining: + description: + - Settings for connection draining . + returned: success + type: complex + contains: + drainingTimeoutSec: + description: + - Time for which instance will be drained (not accept new connections, but + still work to finish started). + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + customRequestHeaders: + description: + - Headers that the HTTP/S load balancer should add to proxied requests. + returned: success + type: list + customResponseHeaders: + description: + - Headers that the HTTP/S load balancer should add to proxied responses. + returned: success + type: list + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + enableCDN: + description: + - If true, enable Cloud CDN for this BackendService. + returned: success + type: bool + healthChecks: + description: + - The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health + checking this BackendService. Currently at most one health check can be specified. + - A health check must be specified unless the backend service uses an internet + or serverless NEG as a backend. + - For internal load balancing, a URL to a HealthCheck resource must be specified + instead. + returned: success + type: list + id: + description: + - The unique identifier for the resource. + returned: success + type: int + iap: + description: + - Settings for enabling Cloud Identity Aware Proxy. + returned: success + type: complex + contains: + enabled: + description: + - Enables IAP. + returned: success + type: bool + oauth2ClientId: + description: + - OAuth2 Client ID for IAP . + returned: success + type: str + oauth2ClientSecret: + description: + - OAuth2 Client Secret for IAP . + returned: success + type: str + oauth2ClientSecretSha256: + description: + - OAuth2 Client Secret SHA-256 for IAP . + returned: success + type: str + loadBalancingScheme: + description: + - Indicates whether the backend service will be used with internal or external + load balancing. A backend service created for one type of load balancing cannot + be used with the other. + returned: success + type: str + localityLbPolicy: + description: + - The load balancing algorithm used within the scope of the locality. + - The possible values are - * ROUND_ROBIN - This is a simple policy in which + each healthy backend is selected in round robin order. + - "* LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts + and picks the host which has fewer active requests." + - "* RING_HASH - The ring/modulo hash load balancer implements consistent hashing + to backends. The algorithm has the property that the addition/removal of a + host from a set of N hosts only affects 1/N of the requests." + - "* RANDOM - The load balancer selects a random healthy host." + - "* ORIGINAL_DESTINATION - Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address as the destination + address of the incoming connection before the connection was redirected to + the load balancer." + - "* MAGLEV - used as a drop in replacement for the ring hash load balancer." + - Maglev is not as stable as ring hash but has faster table lookup build times + and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 + This field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + outlierDetection: + description: + - Settings controlling eviction of unhealthy hosts from the load balancing pool. + - This field is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + baseEjectionTime: + description: + - The base time that a host is ejected for. The real time is equal to the + base time multiplied by the number of times the host has been ejected. + Defaults to 30000ms or 30s. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + consecutiveErrors: + description: + - Number of errors before a host is ejected from the connection pool. When + the backend host is accessed over HTTP, a 5xx return code qualifies as + an error. + - Defaults to 5. + returned: success + type: int + consecutiveGatewayFailure: + description: + - The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + returned: success + type: int + enforcingConsecutiveErrors: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to + disable ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + enforcingConsecutiveGatewayFailure: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting + can be used to disable ejection or to ramp it up slowly. Defaults to 0. + returned: success + type: int + enforcingSuccessRate: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be + used to disable ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + interval: + description: + - Time interval between ejection sweep analysis. This can result in both + new ejections as well as hosts being returned to service. Defaults to + 10 seconds. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + maxEjectionPercent: + description: + - Maximum percentage of hosts in the load balancing pool for the backend + service that can be ejected. Defaults to 10%. + returned: success + type: int + successRateMinimumHosts: + description: + - The number of hosts in a cluster that must have enough request volume + to detect success rate outliers. If the number of hosts is less than this + setting, outlier detection via success rate statistics is not performed + for any host in the cluster. Defaults to 5. + returned: success + type: int + successRateRequestVolume: + description: + - The minimum number of total requests that must be collected in one interval + (as defined by the interval duration above) to include this host in success + rate based outlier detection. If the volume is lower than this setting, + outlier detection via success rate statistics is not performed for that + host. Defaults to 100. + returned: success + type: int + successRateStdevFactor: + description: + - 'This factor is used to determine the ejection threshold for success rate + outlier ejection. The ejection threshold is the difference between the + mean success rate, and the product of this factor and the standard deviation + of the mean success rate: mean - (stdev * success_rate_stdev_factor). + This factor is divided by a thousand to get a double. That is, if the + desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.' + returned: success + type: int + portName: + description: + - Name of backend port. The same name should appear in the instance groups referenced + by this service. Required when the load balancing scheme is EXTERNAL. + returned: success + type: str + protocol: + description: + - The protocol this BackendService uses to communicate with backends. + - 'The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API.' + returned: success + type: str + securityPolicy: + description: + - The security policy associated with this backend service. + returned: success + type: str + sessionAffinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is + not applicable if the protocol is UDP. + returned: success + type: str + timeoutSec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + returned: success + type: int + logConfig: + description: + - This field denotes the logging options for the load balancer traffic served + by this backend service. + - If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: complex + contains: + enable: + description: + - Whether to enable logging for the load balancer traffic served by this + backend service. + returned: success + type: bool + sampleRate: + description: + - This field can only be specified if logging is enabled for this backend + service. The value of the field must be in [0, 1]. This configures the + sampling rate of requests to the load balancer where 1.0 means all logged + requests are reported and 0.0 means no logged requests are reported. + - The default value is 1.0. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/backendServices".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_disk.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_disk.py new file mode 100644 index 000000000..0dd36e2a1 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_disk.py @@ -0,0 +1,872 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_disk +description: +- Persistent disks are durable storage devices that function similarly to the physical + disks in a desktop or a server. Compute Engine manages the hardware behind these + devices to ensure data redundancy and optimize performance for you. Persistent disks + are available as either standard hard disk drives (HDD) or solid-state drives (SSD). +- Persistent disks are located independently from your virtual machine instances, + so you can detach or move persistent disks to keep your data even after you delete + your instances. Persistent disk performance scales automatically with size, so you + can resize your existing persistent disks or add more persistent disks to an instance + to meet your performance and storage space requirements. +- Add a persistent disk to your instance when you need reliable and affordable storage + with consistent performance characteristics. +short_description: Creates a GCP Disk +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + required: false + type: dict + licenses: + description: + - Any applicable publicly visible licenses. + elements: str + required: false + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + size_gb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of the + snapshot. + required: false + type: int + physical_block_size_bytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a request, + a default value is used. Currently supported sizes are 4096 and 16384, other + sizes may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + required: false + type: int + type: + description: + - URL of the disk type resource describing which disk type to use to create the + disk. Provide this when creating the disk. + required: false + type: str + source_image: + description: + - The source image used to create this disk. If the source image is deleted, this + field will not be set. + - 'To create a disk with one of the public operating system images, specify the + image by its family name. For example, specify family/debian-9 to use the latest + Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, + use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD + To create a disk with a private image that you created, specify the image name + in the following format: global/images/my-private-image You can also specify + a private image by its image family, which returns the latest version of the + image in that family. Replace the image name with family/family-name: global/images/family/my-private-family + .' + required: false + type: str + provisioned_iops: + description: + - Indicates how many IOPS must be provisioned for the disk. + required: false + type: int + zone: + description: + - A reference to the zone where the disk resides. + required: true + type: str + source_image_encryption_key: + description: + - The customer-supplied encryption key of the source image. Required if the source + image is protected by a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + kms_key_name: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + required: false + type: str + kms_key_service_account: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + required: false + type: str + disk_encryption_key: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the + same key if you use the disk later (e.g. to create a disk snapshot or an image, + or to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need to + provide a key to use the disk later. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + kms_key_name: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + - Your project's Compute Engine System service account (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) + must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + required: false + type: str + kms_key_service_account: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + required: false + type: str + source_snapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + - 'This field represents a link to a Snapshot resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_snapshot task and then set this source_snapshot field to "{{ + name-of-resource }}"' + required: false + type: dict + source_snapshot_encryption_key: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + kms_key_name: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + required: false + type: str + kms_key_service_account: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/disks)' +- 'Adding a persistent disk: U(https://cloud.google.com/compute/docs/disks/add-persistent-disk)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a disk + google.cloud.gcp_compute_disk: + name: test_object + size_gb: 50 + disk_encryption_key: + raw_key: SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0= + zone: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +lastAttachTimestamp: + description: + - Last attach timestamp in RFC3339 text format. + returned: success + type: str +lastDetachTimestamp: + description: + - Last detach timestamp in RFC3339 text format. + returned: success + type: str +labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + returned: success + type: dict +licenses: + description: + - Any applicable publicly visible licenses. + returned: success + type: list +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +sizeGb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of the + snapshot. + returned: success + type: int +users: + description: + - 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + .' + returned: success + type: list +physicalBlockSizeBytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a request, + a default value is used. Currently supported sizes are 4096 and 16384, other sizes + may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + returned: success + type: int +type: + description: + - URL of the disk type resource describing which disk type to use to create the + disk. Provide this when creating the disk. + returned: success + type: str +sourceImage: + description: + - The source image used to create this disk. If the source image is deleted, this + field will not be set. + - 'To create a disk with one of the public operating system images, specify the + image by its family name. For example, specify family/debian-9 to use the latest + Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, + use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD + To create a disk with a private image that you created, specify the image name + in the following format: global/images/my-private-image You can also specify a + private image by its image family, which returns the latest version of the image + in that family. Replace the image name with family/family-name: global/images/family/my-private-family + .' + returned: success + type: str +provisionedIops: + description: + - Indicates how many IOPS must be provisioned for the disk. + returned: success + type: int +zone: + description: + - A reference to the zone where the disk resides. + returned: success + type: str +sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required if the source + image is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str +sourceImageId: + description: + - The ID value of the image used to create this disk. This value identifies the + exact image that was used to create this persistent disk. For example, if you + created the persistent disk from an image that was later deleted and recreated + under the same name, the source image ID would identify the exact version of the + image that was used. + returned: success + type: str +diskEncryptionKey: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the same + key if you use the disk later (e.g. to create a disk snapshot or an image, or + to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need to + provide a key to use the disk later. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + - Your project's Compute Engine System service account (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) + must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str +sourceSnapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + returned: success + type: dict +sourceSnapshotEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the source + snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str +sourceSnapshotId: + description: + - The unique ID of the snapshot used to create this disk. This value identifies + the exact snapshot that was used to create this persistent disk. For example, + if you created the persistent disk from a snapshot that was later deleted and + recreated under the same name, the source snapshot ID would identify the exact + version of the snapshot that was used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + labels=dict(type='dict'), + licenses=dict(type='list', elements='str'), + name=dict(required=True, type='str'), + size_gb=dict(type='int'), + physical_block_size_bytes=dict(type='int'), + type=dict(type='str'), + source_image=dict(type='str'), + provisioned_iops=dict(type='int'), + zone=dict(required=True, type='str'), + source_image_encryption_key=dict( + type='dict', no_log=True, options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'), kms_key_service_account=dict(type='str')) + ), + disk_encryption_key=dict( + type='dict', no_log=True, options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'), kms_key_service_account=dict(type='str')) + ), + source_snapshot=dict(type='dict', no_log=True), + source_snapshot_encryption_key=dict( + type='dict', no_log=True, options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'), kms_key_service_account=dict(type='str')) + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#disk' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('labels') != request.get('labels'): + label_fingerprint_update(module, request, response) + if response.get('sizeGb') != request.get('sizeGb'): + size_gb_update(module, request, response) + + +def label_fingerprint_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/disks/{name}/setLabels"]).format(**module.params), + {u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')}, + ) + + +def size_gb_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/disks/{name}/resize"]).format(**module.params), + {u'sizeGb': module.params.get('size_gb')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#disk', + u'sourceImageEncryptionKey': DiskSourceimageencryptionkey(module.params.get('source_image_encryption_key', {}), module).to_request(), + u'diskEncryptionKey': DiskDiskencryptionkey(module.params.get('disk_encryption_key', {}), module).to_request(), + u'sourceSnapshot': replace_resource_dict(module.params.get(u'source_snapshot', {}), 'selfLink'), + u'sourceSnapshotEncryptionKey': DiskSourcesnapshotencryptionkey(module.params.get('source_snapshot_encryption_key', {}), module).to_request(), + u'description': module.params.get('description'), + u'labels': module.params.get('labels'), + u'licenses': module.params.get('licenses'), + u'name': module.params.get('name'), + u'sizeGb': module.params.get('size_gb'), + u'physicalBlockSizeBytes': module.params.get('physical_block_size_bytes'), + u'type': disk_type_selflink(module.params.get('type'), module.params), + u'sourceImage': module.params.get('source_image'), + u'provisionedIops': module.params.get('provisioned_iops'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'labelFingerprint': response.get(u'labelFingerprint'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'lastAttachTimestamp': response.get(u'lastAttachTimestamp'), + u'lastDetachTimestamp': response.get(u'lastDetachTimestamp'), + u'labels': response.get(u'labels'), + u'licenses': response.get(u'licenses'), + u'name': module.params.get('name'), + u'sizeGb': response.get(u'sizeGb'), + u'users': response.get(u'users'), + u'physicalBlockSizeBytes': response.get(u'physicalBlockSizeBytes'), + u'type': response.get(u'type'), + u'sourceImage': module.params.get('source_image'), + u'provisionedIops': response.get(u'provisionedIops'), + } + + +def disk_type_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/zones/.*/diskTypes/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#disk') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class DiskSourceimageencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get('raw_key'), + u'kmsKeyName': self.request.get('kms_key_name'), + u'kmsKeyServiceAccount': self.request.get('kms_key_service_account'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get(u'rawKey'), + u'kmsKeyName': self.request.get(u'kmsKeyName'), + u'kmsKeyServiceAccount': self.request.get(u'kmsKeyServiceAccount'), + } + ) + + +class DiskDiskencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get('raw_key'), + u'kmsKeyName': self.request.get('kms_key_name'), + u'kmsKeyServiceAccount': self.request.get('kms_key_service_account'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get(u'rawKey'), + u'kmsKeyName': self.request.get(u'kmsKeyName'), + u'kmsKeyServiceAccount': self.request.get(u'kmsKeyServiceAccount'), + } + ) + + +class DiskSourcesnapshotencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get('raw_key'), + u'kmsKeyName': self.request.get('kms_key_name'), + u'kmsKeyServiceAccount': self.request.get('kms_key_service_account'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get(u'rawKey'), + u'kmsKeyName': self.request.get(u'kmsKeyName'), + u'kmsKeyServiceAccount': self.request.get(u'kmsKeyServiceAccount'), + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_disk_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_disk_info.py new file mode 100644 index 000000000..1abc5c4ca --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_disk_info.py @@ -0,0 +1,429 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_disk_info +description: +- Gather info for GCP Disk +short_description: Gather info for GCP Disk +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - A reference to the zone where the disk resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a disk + gcp_compute_disk_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + lastAttachTimestamp: + description: + - Last attach timestamp in RFC3339 text format. + returned: success + type: str + lastDetachTimestamp: + description: + - Last detach timestamp in RFC3339 text format. + returned: success + type: str + labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + returned: success + type: dict + licenses: + description: + - Any applicable publicly visible licenses. + returned: success + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sizeGb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of + the snapshot. + returned: success + type: int + users: + description: + - 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + .' + returned: success + type: list + physicalBlockSizeBytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a + request, a default value is used. Currently supported sizes are 4096 and 16384, + other sizes may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + returned: success + type: int + type: + description: + - URL of the disk type resource describing which disk type to use to create + the disk. Provide this when creating the disk. + returned: success + type: str + sourceImage: + description: + - The source image used to create this disk. If the source image is deleted, + this field will not be set. + - 'To create a disk with one of the public operating system images, specify + the image by its family name. For example, specify family/debian-9 to use + the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 + Alternatively, use a specific version of a public operating system image: + projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a + disk with a private image that you created, specify the image name in the + following format: global/images/my-private-image You can also specify a private + image by its image family, which returns the latest version of the image in + that family. Replace the image name with family/family-name: global/images/family/my-private-family + .' + returned: success + type: str + provisionedIops: + description: + - Indicates how many IOPS must be provisioned for the disk. + returned: success + type: int + zone: + description: + - A reference to the zone where the disk resides. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required if the + source image is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS + key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str + sourceImageId: + description: + - The ID value of the image used to create this disk. This value identifies + the exact image that was used to create this persistent disk. For example, + if you created the persistent disk from an image that was later deleted and + recreated under the same name, the source image ID would identify the exact + version of the image that was used. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the + same key if you use the disk later (e.g. to create a disk snapshot or an image, + or to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the + disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need + to provide a key to use the disk later. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + - Your project's Compute Engine System service account (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) + must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS + key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str + sourceSnapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + returned: success + type: dict + sourceSnapshotEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS + key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str + sourceSnapshotId: + description: + - The unique ID of the snapshot used to create this disk. This value identifies + the exact snapshot that was used to create this persistent disk. For example, + if you created the persistent disk from a snapshot that was later deleted + and recreated under the same name, the source snapshot ID would identify the + exact version of the snapshot that was used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway.py new file mode 100644 index 000000000..48471504e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway.py @@ -0,0 +1,426 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_external_vpn_gateway +description: +- Represents a VPN gateway managed outside of GCP. +short_description: Creates a GCP ExternalVpnGateway +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + redundancy_type: + description: + - Indicates the redundancy type of this external VPN gateway . + - 'Some valid choices include: "FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", + "TWO_IPS_REDUNDANCY"' + required: false + type: str + interfaces: + description: + - A list of interfaces on this external VPN gateway. + elements: dict + required: false + type: list + suboptions: + id: + description: + - The numberic ID for this interface. Allowed values are based on the redundancy + type of this external VPN gateway * `0 - SINGLE_IP_INTERNALLY_REDUNDANT` + * `0, 1 - TWO_IPS_REDUNDANCY` * `0, 1, 2, 3 - FOUR_IPS_REDUNDANCY` . + required: false + type: int + ip_address: + description: + - IP address of the interface in the external VPN gateway. + - Only IPv4 is supported. This IP address can be either from your on-premise + gateway or another Cloud provider's VPN gateway, it cannot be an IP address + from Google Compute Engine. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/beta/externalVpnGateways)' +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a external vpn gateway + google.cloud.gcp_compute_external_vpn_gateway: + name: test_object + redundancy_type: SINGLE_IP_INTERNALLY_REDUNDANT + descrpition: An externalyl managed VPN gateway + interfaces: + - id: 0 + ip_address: 8.8.8.8 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +description: + description: + - An optional description of this resource. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +redundancyType: + description: + - Indicates the redundancy type of this external VPN gateway . + returned: success + type: str +interfaces: + description: + - A list of interfaces on this external VPN gateway. + returned: success + type: complex + contains: + id: + description: + - The numberic ID for this interface. Allowed values are based on the redundancy + type of this external VPN gateway * `0 - SINGLE_IP_INTERNALLY_REDUNDANT` * + `0, 1 - TWO_IPS_REDUNDANCY` * `0, 1, 2, 3 - FOUR_IPS_REDUNDANCY` . + returned: success + type: int + ipAddress: + description: + - IP address of the interface in the external VPN gateway. + - Only IPv4 is supported. This IP address can be either from your on-premise + gateway or another Cloud provider's VPN gateway, it cannot be an IP address + from Google Compute Engine. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + redundancy_type=dict(type='str'), + interfaces=dict(type='list', elements='dict', options=dict(id=dict(type='int'), ip_address=dict(type='str'))), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#externalVpnGateway' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#externalVpnGateway', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'redundancyType': module.params.get('redundancy_type'), + u'interfaces': ExternalVpnGatewayInterfacesArray(module.params.get('interfaces', []), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/externalVpnGateways/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/externalVpnGateways".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'description': response.get(u'description'), + u'name': response.get(u'name'), + u'redundancyType': response.get(u'redundancyType'), + u'interfaces': ExternalVpnGatewayInterfacesArray(response.get(u'interfaces', []), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#externalVpnGateway') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class ExternalVpnGatewayInterfacesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'id': item.get('id'), u'ipAddress': item.get('ip_address')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'id': item.get(u'id'), u'ipAddress': item.get(u'ipAddress')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway_info.py new file mode 100644 index 000000000..cb4772660 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_external_vpn_gateway_info.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_external_vpn_gateway_info +description: +- Gather info for GCP ExternalVpnGateway +short_description: Gather info for GCP ExternalVpnGateway +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an external vpn gateway + gcp_compute_external_vpn_gateway_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - An optional description of this resource. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + redundancyType: + description: + - Indicates the redundancy type of this external VPN gateway . + returned: success + type: str + interfaces: + description: + - A list of interfaces on this external VPN gateway. + returned: success + type: complex + contains: + id: + description: + - The numberic ID for this interface. Allowed values are based on the redundancy + type of this external VPN gateway * `0 - SINGLE_IP_INTERNALLY_REDUNDANT` + * `0, 1 - TWO_IPS_REDUNDANCY` * `0, 1, 2, 3 - FOUR_IPS_REDUNDANCY` . + returned: success + type: int + ipAddress: + description: + - IP address of the interface in the external VPN gateway. + - Only IPv4 is supported. This IP address can be either from your on-premise + gateway or another Cloud provider's VPN gateway, it cannot be an IP address + from Google Compute Engine. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/externalVpnGateways".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall.py new file mode 100644 index 000000000..08f044492 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall.py @@ -0,0 +1,830 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_firewall +description: +- Each network has its own firewall controlling access to and from the instances. +- All traffic to instances, even from other instances, is blocked by the firewall + unless firewall rules are created to allow it. +- The default network has automatically created firewall rules that are shown in default + firewall rules. No manually created network has automatically created firewall rules + except for a default "allow" rule for outgoing traffic and a default "deny" for + incoming traffic. For all networks except the default network, you must create any + firewall rules you need. +short_description: Creates a GCP Firewall +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + allowed: + description: + - The list of ALLOW rules specified by this firewall. Each rule specifies a protocol + and port-range tuple that describes a permitted connection. + elements: dict + required: false + type: list + suboptions: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp, ipip, all), + or the IP protocol number. + required: true + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only + applicable for UDP or TCP protocol. Each entry must be either an integer + or a range. If not specified, this rule applies to connections through any + port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + elements: str + required: false + type: list + denied: + description: + - The list of DENY rules specified by this firewall. Each rule specifies a protocol + and port-range tuple that describes a denied connection. + elements: dict + required: false + type: list + suboptions: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp, ipip, all), + or the IP protocol number. + required: true + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only + applicable for UDP or TCP protocol. Each entry must be either an integer + or a range. If not specified, this rule applies to connections through any + port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + elements: str + required: false + type: list + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + destination_ranges: + description: + - If destination ranges are specified, the firewall will apply only to traffic + that has destination IP address in these ranges. These ranges must be expressed + in CIDR format. Only IPv4 is supported. + elements: str + required: false + type: list + direction: + description: + - 'Direction of traffic to which this firewall applies; default is INGRESS. Note: + For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS + traffic, it is NOT supported to specify sourceRanges OR sourceTags.' + - 'Some valid choices include: "INGRESS", "EGRESS"' + required: false + type: str + disabled: + description: + - Denotes whether the firewall rule is disabled, i.e not applied to the network + it is associated with. When set to true, the firewall rule is not enforced and + the network behaves as if it did not exist. If this is unspecified, the firewall + rule will be enabled. + required: false + type: bool + log_config: + description: + - This field denotes the logging options for a particular firewall rule. + - If logging is enabled, logs will be exported to Cloud Logging. + required: false + type: dict + suboptions: + enable: + description: + - This field denotes whether to enable logging for a particular firewall rule. + If logging is enabled, logs will be exported to Stackdriver. + required: false + type: bool + metadata: + description: + - This field denotes whether to include or exclude metadata for firewall logs. + - 'Some valid choices include: "EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"' + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + network: + description: + - 'URL of the network resource for this firewall rule. If not specified when creating + a firewall rule, the default network is used: global/networks/default If you + choose to specify this property, you can specify the network as a full or partial + URL. For example, the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/myproject/global/) + networks/my-network projects/myproject/global/networks/my-network global/networks/default + .' + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + default: + selfLink: global/networks/default + type: dict + priority: + description: + - Priority for this rule. This is an integer between 0 and 65535, both inclusive. + When not specified, the value assumed is 1000. Relative priorities determine + precedence of conflicting rules. Lower value of priority implies higher precedence + (eg, a rule with priority 0 has higher precedence than a rule with priority + 1). DENY rules take precedence over ALLOW rules having equal priority. + required: false + default: '1000' + type: int + source_ranges: + description: + - If source ranges are specified, the firewall will apply only to traffic that + has source IP address in these ranges. These ranges must be expressed in CIDR + format. One or both of sourceRanges and sourceTags may be set. If both properties + are set, the firewall will apply to traffic that has source IP address within + sourceRanges OR the source IP that belongs to a tag listed in the sourceTags + property. The connection does not need to match both properties for the firewall + to apply. Only IPv4 is supported. + elements: str + required: false + type: list + source_service_accounts: + description: + - If source service accounts are specified, the firewall will apply only to traffic + originating from an instance with a service account in this list. Source service + accounts cannot be used to control traffic to an instance's external IP address + because service accounts are associated with an instance, not an IP address. + sourceRanges can be set at the same time as sourceServiceAccounts. If both are + set, the firewall will apply to traffic that has source IP address within sourceRanges + OR the source IP belongs to an instance with service account listed in sourceServiceAccount. + The connection does not need to match both properties for the firewall to apply. + sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags. + elements: str + required: false + type: list + source_tags: + description: + - If source tags are specified, the firewall will apply only to traffic with source + IP that belongs to a tag listed in source tags. Source tags cannot be used to + control traffic to an instance's external IP address. Because tags are associated + with an instance, not an IP address. One or both of sourceRanges and sourceTags + may be set. If both properties are set, the firewall will apply to traffic that + has source IP address within sourceRanges OR the source IP that belongs to a + tag listed in the sourceTags property. The connection does not need to match + both properties for the firewall to apply. + elements: str + required: false + type: list + target_service_accounts: + description: + - A list of service accounts indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. + If neither targetServiceAccounts nor targetTags are specified, the firewall + rule applies to all instances on the specified network. + elements: str + required: false + type: list + target_tags: + description: + - A list of instance tags indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - If no targetTags are specified, the firewall rule applies to all instances on + the specified network. + elements: str + required: false + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/firewalls)' +- 'Official Documentation: U(https://cloud.google.com/vpc/docs/firewalls)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a firewall + google.cloud.gcp_compute_firewall: + name: test_object + allowed: + - ip_protocol: tcp + ports: + - '22' + target_tags: + - test-ssh-server + - staging-ssh-server + source_tags: + - test-ssh-clients + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +allowed: + description: + - The list of ALLOW rules specified by this firewall. Each rule specifies a protocol + and port-range tuple that describes a permitted connection. + returned: success + type: complex + contains: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp, ipip, all), or + the IP protocol number. + returned: success + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only applicable + for UDP or TCP protocol. Each entry must be either an integer or a range. + If not specified, this rule applies to connections through any port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + returned: success + type: list +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +denied: + description: + - The list of DENY rules specified by this firewall. Each rule specifies a protocol + and port-range tuple that describes a denied connection. + returned: success + type: complex + contains: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp, ipip, all), or + the IP protocol number. + returned: success + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only applicable + for UDP or TCP protocol. Each entry must be either an integer or a range. + If not specified, this rule applies to connections through any port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + returned: success + type: list +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +destinationRanges: + description: + - If destination ranges are specified, the firewall will apply only to traffic that + has destination IP address in these ranges. These ranges must be expressed in + CIDR format. Only IPv4 is supported. + returned: success + type: list +direction: + description: + - 'Direction of traffic to which this firewall applies; default is INGRESS. Note: + For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS + traffic, it is NOT supported to specify sourceRanges OR sourceTags.' + returned: success + type: str +disabled: + description: + - Denotes whether the firewall rule is disabled, i.e not applied to the network + it is associated with. When set to true, the firewall rule is not enforced and + the network behaves as if it did not exist. If this is unspecified, the firewall + rule will be enabled. + returned: success + type: bool +logConfig: + description: + - This field denotes the logging options for a particular firewall rule. + - If logging is enabled, logs will be exported to Cloud Logging. + returned: success + type: complex + contains: + enable: + description: + - This field denotes whether to enable logging for a particular firewall rule. + If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: bool + metadata: + description: + - This field denotes whether to include or exclude metadata for firewall logs. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +network: + description: + - 'URL of the network resource for this firewall rule. If not specified when creating + a firewall rule, the default network is used: global/networks/default If you choose + to specify this property, you can specify the network as a full or partial URL. + For example, the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/myproject/global/) + networks/my-network projects/myproject/global/networks/my-network global/networks/default + .' + returned: success + type: dict +priority: + description: + - Priority for this rule. This is an integer between 0 and 65535, both inclusive. + When not specified, the value assumed is 1000. Relative priorities determine precedence + of conflicting rules. Lower value of priority implies higher precedence (eg, a + rule with priority 0 has higher precedence than a rule with priority 1). DENY + rules take precedence over ALLOW rules having equal priority. + returned: success + type: int +sourceRanges: + description: + - If source ranges are specified, the firewall will apply only to traffic that has + source IP address in these ranges. These ranges must be expressed in CIDR format. + One or both of sourceRanges and sourceTags may be set. If both properties are + set, the firewall will apply to traffic that has source IP address within sourceRanges + OR the source IP that belongs to a tag listed in the sourceTags property. The + connection does not need to match both properties for the firewall to apply. Only + IPv4 is supported. + returned: success + type: list +sourceServiceAccounts: + description: + - If source service accounts are specified, the firewall will apply only to traffic + originating from an instance with a service account in this list. Source service + accounts cannot be used to control traffic to an instance's external IP address + because service accounts are associated with an instance, not an IP address. sourceRanges + can be set at the same time as sourceServiceAccounts. If both are set, the firewall + will apply to traffic that has source IP address within sourceRanges OR the source + IP belongs to an instance with service account listed in sourceServiceAccount. + The connection does not need to match both properties for the firewall to apply. + sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags. + returned: success + type: list +sourceTags: + description: + - If source tags are specified, the firewall will apply only to traffic with source + IP that belongs to a tag listed in source tags. Source tags cannot be used to + control traffic to an instance's external IP address. Because tags are associated + with an instance, not an IP address. One or both of sourceRanges and sourceTags + may be set. If both properties are set, the firewall will apply to traffic that + has source IP address within sourceRanges OR the source IP that belongs to a tag + listed in the sourceTags property. The connection does not need to match both + properties for the firewall to apply. + returned: success + type: list +targetServiceAccounts: + description: + - A list of service accounts indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. + If neither targetServiceAccounts nor targetTags are specified, the firewall rule + applies to all instances on the specified network. + returned: success + type: list +targetTags: + description: + - A list of instance tags indicating sets of instances located in the network that + may make network connections as specified in allowed[]. + - If no targetTags are specified, the firewall rule applies to all instances on + the specified network. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + allowed=dict(type='list', elements='dict', options=dict(ip_protocol=dict(required=True, type='str'), ports=dict(type='list', elements='str'))), + denied=dict(type='list', elements='dict', options=dict(ip_protocol=dict(required=True, type='str'), ports=dict(type='list', elements='str'))), + description=dict(type='str'), + destination_ranges=dict(type='list', elements='str'), + direction=dict(type='str'), + disabled=dict(type='bool'), + log_config=dict(type='dict', options=dict(enable=dict(type='bool'), metadata=dict(type='str'))), + name=dict(required=True, type='str'), + network=dict(default=dict(selfLink='global/networks/default'), type='dict'), + priority=dict(default=1000, type='int'), + source_ranges=dict(type='list', elements='str'), + source_service_accounts=dict(type='list', elements='str'), + source_tags=dict(type='list', elements='str'), + target_service_accounts=dict(type='list', elements='str'), + target_tags=dict(type='list', elements='str'), + ), + mutually_exclusive=[ + ['destination_ranges', 'source_ranges', 'source_tags'], + ['destination_ranges', 'source_ranges'], + ['source_service_accounts', 'source_tags', 'target_tags'], + ['destination_ranges', 'source_service_accounts', 'source_tags', 'target_service_accounts'], + ['source_tags', 'target_service_accounts', 'target_tags'], + ['source_service_accounts', 'target_service_accounts', 'target_tags'], + ], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#firewall' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.patch(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#firewall', + u'allowed': FirewallAllowedArray(module.params.get('allowed', []), module).to_request(), + u'denied': FirewallDeniedArray(module.params.get('denied', []), module).to_request(), + u'description': module.params.get('description'), + u'destinationRanges': module.params.get('destination_ranges'), + u'direction': module.params.get('direction'), + u'disabled': module.params.get('disabled'), + u'logConfig': FirewallLogconfig(module.params.get('log_config', {}), module).to_request(), + u'name': module.params.get('name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'priority': module.params.get('priority'), + u'sourceRanges': module.params.get('source_ranges'), + u'sourceServiceAccounts': module.params.get('source_service_accounts'), + u'sourceTags': module.params.get('source_tags'), + u'targetServiceAccounts': module.params.get('target_service_accounts'), + u'targetTags': module.params.get('target_tags'), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/firewalls/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/firewalls".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'allowed': FirewallAllowedArray(response.get(u'allowed', []), module).from_response(), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'denied': FirewallDeniedArray(response.get(u'denied', []), module).from_response(), + u'description': response.get(u'description'), + u'destinationRanges': response.get(u'destinationRanges'), + u'direction': response.get(u'direction'), + u'disabled': response.get(u'disabled'), + u'logConfig': FirewallLogconfig(response.get(u'logConfig', {}), module).from_response(), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'network': response.get(u'network'), + u'priority': response.get(u'priority'), + u'sourceRanges': response.get(u'sourceRanges'), + u'sourceServiceAccounts': response.get(u'sourceServiceAccounts'), + u'sourceTags': response.get(u'sourceTags'), + u'targetServiceAccounts': response.get(u'targetServiceAccounts'), + u'targetTags': response.get(u'targetTags'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#firewall') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def encode_request(request, module): + if 'network' in request and request['network'] is not None: + if not re.match(r'https://www.googleapis.com/compute/v1/projects/.*', request['network']): + request['network'] = 'https://www.googleapis.com/compute/v1/projects/{project}/{network}'.format( + project=module.params['project'], network=request['network'] + ) + + return request + + +class FirewallAllowedArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'IPProtocol': item.get('ip_protocol'), u'ports': item.get('ports')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'IPProtocol': item.get(u'IPProtocol'), u'ports': item.get(u'ports')}) + + +class FirewallDeniedArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'IPProtocol': item.get('ip_protocol'), u'ports': item.get('ports')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'IPProtocol': item.get(u'IPProtocol'), u'ports': item.get(u'ports')}) + + +class FirewallLogconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enable': self.request.get('enable'), u'metadata': self.request.get('metadata')}) + + def from_response(self): + return remove_nones_from_dict({u'enable': self.request.get(u'enable'), u'metadata': self.request.get(u'metadata')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall_info.py new file mode 100644 index 000000000..6b90c57ea --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_firewall_info.py @@ -0,0 +1,376 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_firewall_info +description: +- Gather info for GCP Firewall +short_description: Gather info for GCP Firewall +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a firewall + gcp_compute_firewall_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + allowed: + description: + - The list of ALLOW rules specified by this firewall. Each rule specifies a + protocol and port-range tuple that describes a permitted connection. + returned: success + type: complex + contains: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp, ipip, all), + or the IP protocol number. + returned: success + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only + applicable for UDP or TCP protocol. Each entry must be either an integer + or a range. If not specified, this rule applies to connections through + any port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + returned: success + type: list + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + denied: + description: + - The list of DENY rules specified by this firewall. Each rule specifies a protocol + and port-range tuple that describes a denied connection. + returned: success + type: complex + contains: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp, ipip, all), + or the IP protocol number. + returned: success + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only + applicable for UDP or TCP protocol. Each entry must be either an integer + or a range. If not specified, this rule applies to connections through + any port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + returned: success + type: list + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + destinationRanges: + description: + - If destination ranges are specified, the firewall will apply only to traffic + that has destination IP address in these ranges. These ranges must be expressed + in CIDR format. Only IPv4 is supported. + returned: success + type: list + direction: + description: + - 'Direction of traffic to which this firewall applies; default is INGRESS. + Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; + For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.' + returned: success + type: str + disabled: + description: + - Denotes whether the firewall rule is disabled, i.e not applied to the network + it is associated with. When set to true, the firewall rule is not enforced + and the network behaves as if it did not exist. If this is unspecified, the + firewall rule will be enabled. + returned: success + type: bool + logConfig: + description: + - This field denotes the logging options for a particular firewall rule. + - If logging is enabled, logs will be exported to Cloud Logging. + returned: success + type: complex + contains: + enable: + description: + - This field denotes whether to enable logging for a particular firewall + rule. If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: bool + metadata: + description: + - This field denotes whether to include or exclude metadata for firewall + logs. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - 'URL of the network resource for this firewall rule. If not specified when + creating a firewall rule, the default network is used: global/networks/default + If you choose to specify this property, you can specify the network as a full + or partial URL. For example, the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/myproject/global/) + networks/my-network projects/myproject/global/networks/my-network global/networks/default + .' + returned: success + type: dict + priority: + description: + - Priority for this rule. This is an integer between 0 and 65535, both inclusive. + When not specified, the value assumed is 1000. Relative priorities determine + precedence of conflicting rules. Lower value of priority implies higher precedence + (eg, a rule with priority 0 has higher precedence than a rule with priority + 1). DENY rules take precedence over ALLOW rules having equal priority. + returned: success + type: int + sourceRanges: + description: + - If source ranges are specified, the firewall will apply only to traffic that + has source IP address in these ranges. These ranges must be expressed in CIDR + format. One or both of sourceRanges and sourceTags may be set. If both properties + are set, the firewall will apply to traffic that has source IP address within + sourceRanges OR the source IP that belongs to a tag listed in the sourceTags + property. The connection does not need to match both properties for the firewall + to apply. Only IPv4 is supported. + returned: success + type: list + sourceServiceAccounts: + description: + - If source service accounts are specified, the firewall will apply only to + traffic originating from an instance with a service account in this list. + Source service accounts cannot be used to control traffic to an instance's + external IP address because service accounts are associated with an instance, + not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. + If both are set, the firewall will apply to traffic that has source IP address + within sourceRanges OR the source IP belongs to an instance with service account + listed in sourceServiceAccount. The connection does not need to match both + properties for the firewall to apply. sourceServiceAccounts cannot be used + at the same time as sourceTags or targetTags. + returned: success + type: list + sourceTags: + description: + - If source tags are specified, the firewall will apply only to traffic with + source IP that belongs to a tag listed in source tags. Source tags cannot + be used to control traffic to an instance's external IP address. Because tags + are associated with an instance, not an IP address. One or both of sourceRanges + and sourceTags may be set. If both properties are set, the firewall will apply + to traffic that has source IP address within sourceRanges OR the source IP + that belongs to a tag listed in the sourceTags property. The connection does + not need to match both properties for the firewall to apply. + returned: success + type: list + targetServiceAccounts: + description: + - A list of service accounts indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. + If neither targetServiceAccounts nor targetTags are specified, the firewall + rule applies to all instances on the specified network. + returned: success + type: list + targetTags: + description: + - A list of instance tags indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - If no targetTags are specified, the firewall rule applies to all instances + on the specified network. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/firewalls".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule.py new file mode 100644 index 000000000..f1c13cc8b --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule.py @@ -0,0 +1,739 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_forwarding_rule +description: +- A ForwardingRule resource. A ForwardingRule resource specifies which pool of target + virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, + portRange] tuple. +short_description: Creates a GCP ForwardingRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + is_mirroring_collector: + description: + - Indicates whether or not this load balancer can be used as a collector for packet + mirroring. To prevent mirroring loops, instances behind this load balancer will + not have their traffic mirrored even if a PacketMirroring rule applies to them. + This can only be set to true for load balancers that have their loadBalancingScheme + set to INTERNAL. + required: false + type: bool + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + ip_address: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the + address must be a global IP, and for regional forwarding rules, the address + must live in the same region as the forwarding rule. If this field is empty, + an ephemeral IPv4 address from the same scope (global or regional) will be assigned. + A regional forwarding rule supports IPv4 only. A global forwarding rule supports + either IPv4 or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP + address belonging to the network/subnet configured for the forwarding rule. + By default, if this field is empty, an ephemeral internal IP address will be + automatically allocated from the IP range of the subnet or network configured + for this forwarding rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address) + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + required: false + type: str + ip_protocol: + description: + - The IP protocol to which this rule applies. + - When the load balancing scheme is INTERNAL, only TCP and UDP are valid. + - 'Some valid choices include: "TCP", "UDP", "ESP", "AH", "SCTP", "ICMP"' + required: false + type: str + backend_service: + description: + - A BackendService to receive the matched traffic. This is used only for INTERNAL + load balancing. + - 'This field represents a link to a BackendService resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_backend_service task and then set this backend_service + field to "{{ name-of-resource }}"' + required: false + type: dict + load_balancing_scheme: + description: + - This signifies what the ForwardingRule will be used for and can be EXTERNAL, + INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic Cloud VPN gateways, + protocol forwarding to VMs from an external IP address, and HTTP(S), SSL Proxy, + TCP Proxy, and Network TCP/UDP load balancers. + - INTERNAL is used for protocol forwarding to VMs from an internal IP address, + and internal TCP/UDP load balancers. + - INTERNAL_MANAGED is used for internal HTTP(S) load balancers. + - 'Some valid choices include: "EXTERNAL", "INTERNAL", "INTERNAL_MANAGED"' + required: false + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + network: + description: + - For internal load balancing, this field identifies the network that the load + balanced IP should belong to for this Forwarding Rule. If this field is not + specified, the default network will be used. + - This field is only used for INTERNAL load balancing. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + type: dict + port_range: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed + to ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: * + TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43, + 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25, + 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: + 500, 4500 .' + required: false + type: str + ports: + description: + - This field is used along with the backend_service field for internal load balancing. + - When the load balancing scheme is INTERNAL, a single port or a comma separated + list of ports can be configured. Only packets addressed to these ports will + be forwarded to the backends configured with this forwarding rule. + - You may specify a maximum of up to 5 ports. + elements: str + required: false + type: list + subnetwork: + description: + - The subnetwork that the load balanced IP should belong to for this Forwarding + Rule. This field is only used for INTERNAL load balancing. + - If the network specified is in auto subnet mode, this field is optional. However, + if the network is in custom subnet mode, a subnetwork must be specified. + - 'This field represents a link to a Subnetwork resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_subnetwork task and then set this subnetwork field to "{{ name-of-resource + }}"' + required: false + type: dict + target: + description: + - The URL of the target resource to receive the matched traffic. + - The target must live in the same region as the forwarding rule. + - The forwarded traffic must be of a type appropriate to the target object. + required: false + type: str + allow_global_access: + description: + - If true, clients can access ILB from all regions. + - Otherwise only allows from the local region the ILB is located at. + required: false + type: bool + all_ports: + description: + - For internal TCP/UDP load balancing (i.e. load balancing scheme is INTERNAL + and protocol is TCP/UDP), set this to true to allow packets addressed to any + ports to be forwarded to the backends configured with this forwarding rule. + Used with backend service. Cannot be set if port or portRange are set. + required: false + type: bool + network_tier: + description: + - The networking tier used for configuring this address. If this field is not + specified, it is assumed to be PREMIUM. + - 'Some valid choices include: "PREMIUM", "STANDARD"' + required: false + type: str + service_label: + description: + - An optional prefix to the service name for this Forwarding Rule. + - If specified, will be the first label of the fully qualified service name. + - The label must be 1-63 characters long, and comply with RFC1035. + - Specifically, the label must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + - This field is only used for INTERNAL load balancing. + required: false + type: str + region: + description: + - A reference to the region where the regional forwarding rule resides. + - This field is not applicable to global forwarding rules. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/forwardingRules)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a address + google.cloud.gcp_compute_address: + name: address-forwardingrule + region: us-west1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a target pool + google.cloud.gcp_compute_target_pool: + name: targetpool-forwardingrule + region: us-west1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: targetpool + +- name: create a forwarding rule + google.cloud.gcp_compute_forwarding_rule: + name: test_object + region: us-west1 + target: "{{ targetpool }}" + ip_protocol: TCP + port_range: 80-80 + ip_address: "{{ address.address }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +isMirroringCollector: + description: + - Indicates whether or not this load balancer can be used as a collector for packet + mirroring. To prevent mirroring loops, instances behind this load balancer will + not have their traffic mirrored even if a PacketMirroring rule applies to them. + This can only be set to true for load balancers that have their loadBalancingScheme + set to INTERNAL. + returned: success + type: bool +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +IPAddress: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the address + must be a global IP, and for regional forwarding rules, the address must live + in the same region as the forwarding rule. If this field is empty, an ephemeral + IPv4 address from the same scope (global or regional) will be assigned. A regional + forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 + or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address + belonging to the network/subnet configured for the forwarding rule. By default, + if this field is empty, an ephemeral internal IP address will be automatically + allocated from the IP range of the subnet or network configured for this forwarding + rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address) + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + returned: success + type: str +IPProtocol: + description: + - The IP protocol to which this rule applies. + - When the load balancing scheme is INTERNAL, only TCP and UDP are valid. + returned: success + type: str +backendService: + description: + - A BackendService to receive the matched traffic. This is used only for INTERNAL + load balancing. + returned: success + type: dict +loadBalancingScheme: + description: + - This signifies what the ForwardingRule will be used for and can be EXTERNAL, INTERNAL, + or INTERNAL_MANAGED. EXTERNAL is used for Classic Cloud VPN gateways, protocol + forwarding to VMs from an external IP address, and HTTP(S), SSL Proxy, TCP Proxy, + and Network TCP/UDP load balancers. + - INTERNAL is used for protocol forwarding to VMs from an internal IP address, and + internal TCP/UDP load balancers. + - INTERNAL_MANAGED is used for internal HTTP(S) load balancers. + returned: success + type: str +name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +network: + description: + - For internal load balancing, this field identifies the network that the load balanced + IP should belong to for this Forwarding Rule. If this field is not specified, + the default network will be used. + - This field is only used for INTERNAL load balancing. + returned: success + type: dict +portRange: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to + ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: * TargetHttpProxy: + 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43, 110, 143, 195, 443, + 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25, 43, 110, 143, 195, 443, + 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: 500, 4500 .' + returned: success + type: str +ports: + description: + - This field is used along with the backend_service field for internal load balancing. + - When the load balancing scheme is INTERNAL, a single port or a comma separated + list of ports can be configured. Only packets addressed to these ports will be + forwarded to the backends configured with this forwarding rule. + - You may specify a maximum of up to 5 ports. + returned: success + type: list +subnetwork: + description: + - The subnetwork that the load balanced IP should belong to for this Forwarding + Rule. This field is only used for INTERNAL load balancing. + - If the network specified is in auto subnet mode, this field is optional. However, + if the network is in custom subnet mode, a subnetwork must be specified. + returned: success + type: dict +target: + description: + - The URL of the target resource to receive the matched traffic. + - The target must live in the same region as the forwarding rule. + - The forwarded traffic must be of a type appropriate to the target object. + returned: success + type: str +allowGlobalAccess: + description: + - If true, clients can access ILB from all regions. + - Otherwise only allows from the local region the ILB is located at. + returned: success + type: bool +allPorts: + description: + - For internal TCP/UDP load balancing (i.e. load balancing scheme is INTERNAL and + protocol is TCP/UDP), set this to true to allow packets addressed to any ports + to be forwarded to the backends configured with this forwarding rule. Used with + backend service. Cannot be set if port or portRange are set. + returned: success + type: bool +networkTier: + description: + - The networking tier used for configuring this address. If this field is not specified, + it is assumed to be PREMIUM. + returned: success + type: str +serviceLabel: + description: + - An optional prefix to the service name for this Forwarding Rule. + - If specified, will be the first label of the fully qualified service name. + - The label must be 1-63 characters long, and comply with RFC1035. + - Specifically, the label must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + - This field is only used for INTERNAL load balancing. + returned: success + type: str +serviceName: + description: + - The internal fully qualified service name for this Forwarding Rule. + - This field is only used for INTERNAL load balancing. + returned: success + type: str +region: + description: + - A reference to the region where the regional forwarding rule resides. + - This field is not applicable to global forwarding rules. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + is_mirroring_collector=dict(type='bool'), + description=dict(type='str'), + ip_address=dict(type='str'), + ip_protocol=dict(type='str'), + backend_service=dict(type='dict'), + load_balancing_scheme=dict(type='str'), + name=dict(required=True, type='str'), + network=dict(type='dict'), + port_range=dict(type='str'), + ports=dict(type='list', elements='str'), + subnetwork=dict(type='dict'), + target=dict(type='str'), + allow_global_access=dict(type='bool'), + all_ports=dict(type='bool'), + network_tier=dict(type='str'), + service_label=dict(type='str'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#forwardingRule' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('target') != request.get('target'): + target_update(module, request, response) + if response.get('allowGlobalAccess') != request.get('allowGlobalAccess'): + allow_global_access_update(module, request, response) + + +def target_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/forwardingRules/{name}/setTarget"]).format(**module.params), + {u'target': module.params.get('target')}, + ) + + +def allow_global_access_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.patch( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/forwardingRules/{name}"]).format(**module.params), + {u'allowGlobalAccess': module.params.get('allow_global_access')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#forwardingRule', + u'isMirroringCollector': module.params.get('is_mirroring_collector'), + u'description': module.params.get('description'), + u'IPAddress': module.params.get('ip_address'), + u'IPProtocol': module.params.get('ip_protocol'), + u'backendService': replace_resource_dict(module.params.get(u'backend_service', {}), 'selfLink'), + u'loadBalancingScheme': module.params.get('load_balancing_scheme'), + u'name': module.params.get('name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'portRange': module.params.get('port_range'), + u'ports': module.params.get('ports'), + u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink'), + u'target': module.params.get('target'), + u'allowGlobalAccess': module.params.get('allow_global_access'), + u'allPorts': module.params.get('all_ports'), + u'networkTier': module.params.get('network_tier'), + u'serviceLabel': module.params.get('service_label'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'isMirroringCollector': response.get(u'isMirroringCollector'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'IPAddress': response.get(u'IPAddress'), + u'IPProtocol': response.get(u'IPProtocol'), + u'backendService': response.get(u'backendService'), + u'loadBalancingScheme': response.get(u'loadBalancingScheme'), + u'name': response.get(u'name'), + u'network': response.get(u'network'), + u'portRange': response.get(u'portRange'), + u'ports': response.get(u'ports'), + u'subnetwork': response.get(u'subnetwork'), + u'target': response.get(u'target'), + u'allowGlobalAccess': response.get(u'allowGlobalAccess'), + u'allPorts': response.get(u'allPorts'), + u'networkTier': module.params.get('network_tier'), + u'serviceLabel': response.get(u'serviceLabel'), + u'serviceName': response.get(u'serviceName'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#forwardingRule') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule_info.py new file mode 100644 index 000000000..f13135d54 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_forwarding_rule_info.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_forwarding_rule_info +description: +- Gather info for GCP ForwardingRule +short_description: Gather info for GCP ForwardingRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - A reference to the region where the regional forwarding rule resides. + - This field is not applicable to global forwarding rules. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a forwarding rule + gcp_compute_forwarding_rule_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + isMirroringCollector: + description: + - Indicates whether or not this load balancer can be used as a collector for + packet mirroring. To prevent mirroring loops, instances behind this load balancer + will not have their traffic mirrored even if a PacketMirroring rule applies + to them. This can only be set to true for load balancers that have their loadBalancingScheme + set to INTERNAL. + returned: success + type: bool + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + IPAddress: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the + address must be a global IP, and for regional forwarding rules, the address + must live in the same region as the forwarding rule. If this field is empty, + an ephemeral IPv4 address from the same scope (global or regional) will be + assigned. A regional forwarding rule supports IPv4 only. A global forwarding + rule supports either IPv4 or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP + address belonging to the network/subnet configured for the forwarding rule. + By default, if this field is empty, an ephemeral internal IP address will + be automatically allocated from the IP range of the subnet or network configured + for this forwarding rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address) + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + returned: success + type: str + IPProtocol: + description: + - The IP protocol to which this rule applies. + - When the load balancing scheme is INTERNAL, only TCP and UDP are valid. + returned: success + type: str + backendService: + description: + - A BackendService to receive the matched traffic. This is used only for INTERNAL + load balancing. + returned: success + type: dict + loadBalancingScheme: + description: + - This signifies what the ForwardingRule will be used for and can be EXTERNAL, + INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic Cloud VPN gateways, + protocol forwarding to VMs from an external IP address, and HTTP(S), SSL Proxy, + TCP Proxy, and Network TCP/UDP load balancers. + - INTERNAL is used for protocol forwarding to VMs from an internal IP address, + and internal TCP/UDP load balancers. + - INTERNAL_MANAGED is used for internal HTTP(S) load balancers. + returned: success + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - For internal load balancing, this field identifies the network that the load + balanced IP should belong to for this Forwarding Rule. If this field is not + specified, the default network will be used. + - This field is only used for INTERNAL load balancing. + returned: success + type: dict + portRange: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed + to ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: + * TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, + 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: + 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: + 500, 4500 .' + returned: success + type: str + ports: + description: + - This field is used along with the backend_service field for internal load + balancing. + - When the load balancing scheme is INTERNAL, a single port or a comma separated + list of ports can be configured. Only packets addressed to these ports will + be forwarded to the backends configured with this forwarding rule. + - You may specify a maximum of up to 5 ports. + returned: success + type: list + subnetwork: + description: + - The subnetwork that the load balanced IP should belong to for this Forwarding + Rule. This field is only used for INTERNAL load balancing. + - If the network specified is in auto subnet mode, this field is optional. However, + if the network is in custom subnet mode, a subnetwork must be specified. + returned: success + type: dict + target: + description: + - The URL of the target resource to receive the matched traffic. + - The target must live in the same region as the forwarding rule. + - The forwarded traffic must be of a type appropriate to the target object. + returned: success + type: str + allowGlobalAccess: + description: + - If true, clients can access ILB from all regions. + - Otherwise only allows from the local region the ILB is located at. + returned: success + type: bool + allPorts: + description: + - For internal TCP/UDP load balancing (i.e. load balancing scheme is INTERNAL + and protocol is TCP/UDP), set this to true to allow packets addressed to any + ports to be forwarded to the backends configured with this forwarding rule. + Used with backend service. Cannot be set if port or portRange are set. + returned: success + type: bool + networkTier: + description: + - The networking tier used for configuring this address. If this field is not + specified, it is assumed to be PREMIUM. + returned: success + type: str + serviceLabel: + description: + - An optional prefix to the service name for this Forwarding Rule. + - If specified, will be the first label of the fully qualified service name. + - The label must be 1-63 characters long, and comply with RFC1035. + - Specifically, the label must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + - This field is only used for INTERNAL load balancing. + returned: success + type: str + serviceName: + description: + - The internal fully qualified service name for this Forwarding Rule. + - This field is only used for INTERNAL load balancing. + returned: success + type: str + region: + description: + - A reference to the region where the regional forwarding rule resides. + - This field is not applicable to global forwarding rules. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address.py new file mode 100644 index 000000000..d1f02bdff --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address.py @@ -0,0 +1,462 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_global_address +description: +- Represents a Global Address resource. Global addresses are used for HTTP(S) load + balancing. +short_description: Creates a GCP GlobalAddress +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + address: + description: + - The static external IP address represented by this resource. + required: false + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + ip_version: + description: + - The IP Version that will be used by this address. The default value is `IPV4`. + - 'Some valid choices include: "IPV4", "IPV6"' + required: false + type: str + prefix_length: + description: + - The prefix length of the IP range. If not present, it means the address field + is a single IP address. + - This field is not applicable to addresses with addressType=EXTERNAL, or addressType=INTERNAL + when purpose=PRIVATE_SERVICE_CONNECT . + required: false + type: int + address_type: + description: + - The type of the address to reserve. + - "* EXTERNAL indicates public/external single IP address." + - "* INTERNAL indicates internal IP ranges belonging to some network." + - 'Some valid choices include: "EXTERNAL", "INTERNAL"' + required: false + default: EXTERNAL + type: str + purpose: + description: + - 'The purpose of the resource. Possible values include: * VPC_PEERING - for peer + networks * PRIVATE_SERVICE_CONNECT - for ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) Private Service Connect networks .' + required: false + type: str + network: + description: + - The URL of the network in which to reserve the IP range. The IP range must be + in RFC1918 space. The network cannot be deleted if there are any reserved IP + ranges referring to it. + - This should only be set when using an Internal address. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/globalAddresses)' +- 'Reserving a Static External IP Address: U(https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a global address + google.cloud.gcp_compute_global_address: + name: test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +address: + description: + - The static external IP address represented by this resource. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +ipVersion: + description: + - The IP Version that will be used by this address. The default value is `IPV4`. + returned: success + type: str +region: + description: + - A reference to the region where the regional address resides. + returned: success + type: str +prefixLength: + description: + - The prefix length of the IP range. If not present, it means the address field + is a single IP address. + - This field is not applicable to addresses with addressType=EXTERNAL, or addressType=INTERNAL + when purpose=PRIVATE_SERVICE_CONNECT . + returned: success + type: int +addressType: + description: + - The type of the address to reserve. + - "* EXTERNAL indicates public/external single IP address." + - "* INTERNAL indicates internal IP ranges belonging to some network." + returned: success + type: str +purpose: + description: + - 'The purpose of the resource. Possible values include: * VPC_PEERING - for peer + networks * PRIVATE_SERVICE_CONNECT - for ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) Private Service Connect networks .' + returned: success + type: str +network: + description: + - The URL of the network in which to reserve the IP range. The IP range must be + in RFC1918 space. The network cannot be deleted if there are any reserved IP ranges + referring to it. + - This should only be set when using an Internal address. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + address=dict(type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + ip_version=dict(type='str'), + prefix_length=dict(type='int'), + address_type=dict(default='EXTERNAL', type='str'), + purpose=dict(type='str'), + network=dict(type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#address' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#address', + u'address': module.params.get('address'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'ipVersion': module.params.get('ip_version'), + u'prefixLength': module.params.get('prefix_length'), + u'addressType': module.params.get('address_type'), + u'purpose': module.params.get('purpose'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/addresses/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/addresses".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'address': response.get(u'address'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'ipVersion': response.get(u'ipVersion'), + u'region': response.get(u'region'), + u'prefixLength': response.get(u'prefixLength'), + u'addressType': response.get(u'addressType'), + u'purpose': response.get(u'purpose'), + u'network': response.get(u'network'), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#address') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address_info.py new file mode 100644 index 000000000..a6cc0b882 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_address_info.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_global_address_info +description: +- Gather info for GCP GlobalAddress +short_description: Gather info for GCP GlobalAddress +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a global address + gcp_compute_global_address_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + address: + description: + - The static external IP address represented by this resource. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + ipVersion: + description: + - The IP Version that will be used by this address. The default value is `IPV4`. + returned: success + type: str + region: + description: + - A reference to the region where the regional address resides. + returned: success + type: str + prefixLength: + description: + - The prefix length of the IP range. If not present, it means the address field + is a single IP address. + - This field is not applicable to addresses with addressType=EXTERNAL, or addressType=INTERNAL + when purpose=PRIVATE_SERVICE_CONNECT . + returned: success + type: int + addressType: + description: + - The type of the address to reserve. + - "* EXTERNAL indicates public/external single IP address." + - "* INTERNAL indicates internal IP ranges belonging to some network." + returned: success + type: str + purpose: + description: + - 'The purpose of the resource. Possible values include: * VPC_PEERING - for + peer networks * PRIVATE_SERVICE_CONNECT - for ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) Private Service Connect networks .' + returned: success + type: str + network: + description: + - The URL of the network in which to reserve the IP range. The IP range must + be in RFC1918 space. The network cannot be deleted if there are any reserved + IP ranges referring to it. + - This should only be set when using an Internal address. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/addresses".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule.py new file mode 100644 index 000000000..a9bb647f6 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule.py @@ -0,0 +1,780 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_global_forwarding_rule +description: +- Represents a GlobalForwardingRule resource. Global forwarding rules are used to + forward traffic to the correct load balancer for HTTP load balancing. Global forwarding + rules can only be used for HTTP load balancing. +- For more information, see U(https://cloud.google.com/compute/docs/load-balancing/http/) + . +short_description: Creates a GCP GlobalForwardingRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + ip_address: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the + address must be a global IP, and for regional forwarding rules, the address + must live in the same region as the forwarding rule. If this field is empty, + an ephemeral IPv4 address from the same scope (global or regional) will be assigned. + A regional forwarding rule supports IPv4 only. A global forwarding rule supports + either IPv4 or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP + address belonging to the network/subnet configured for the forwarding rule. + By default, if this field is empty, an ephemeral internal IP address will be + automatically allocated from the IP range of the subnet or network configured + for this forwarding rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address) + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + required: false + type: str + ip_protocol: + description: + - The IP protocol to which this rule applies. When the load balancing scheme is + INTERNAL_SELF_MANAGED, only TCP is valid. This field must not be set if the + global address is configured as a purpose of PRIVATE_SERVICE_CONNECT and addressType + of INTERNAL . + - 'Some valid choices include: "TCP", "UDP", "ESP", "AH", "SCTP", "ICMP"' + required: false + type: str + ip_version: + description: + - The IP Version that will be used by this global forwarding rule. + - 'Some valid choices include: "IPV4", "IPV6"' + required: false + type: str + load_balancing_scheme: + description: + - This signifies what the GlobalForwardingRule will be used for. + - 'The value of INTERNAL_SELF_MANAGED means that this will be used for Internal + Global HTTP(S) LB. The value of EXTERNAL means that this will be used for External + Global Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy) ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) Note: This field must be set "" if the global address is configured as + a purpose of PRIVATE_SERVICE_CONNECT and addressType of INTERNAL.' + - 'Some valid choices include: "EXTERNAL", "INTERNAL_SELF_MANAGED"' + required: false + default: EXTERNAL + type: str + metadata_filters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing configuration + to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, + xDS clients present node metadata. If a match takes place, the relevant routing + configuration is made available to those proxies. + - For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, + at least one of the filterLabels must match the corresponding label provided + in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of + its filterLabels must match with corresponding labels in the provided metadata. + - metadataFilters specified here can be overridden by those specified in the UrlMap + that this ForwardingRule references. + - metadataFilters only applies to Loadbalancers that have their loadBalancingScheme + set to INTERNAL_SELF_MANAGED. + elements: dict + required: false + type: list + suboptions: + filter_match_criteria: + description: + - Specifies how individual filterLabel matches within the list of filterLabels + contribute towards the overall metadataFilter match. + - MATCH_ANY - At least one of the filterLabels must have a matching label + in the provided metadata. + - MATCH_ALL - All filterLabels must have matching labels in the provided metadata. + - 'Some valid choices include: "MATCH_ANY", "MATCH_ALL"' + required: true + type: str + filter_labels: + description: + - The list of label value pairs that must match labels in the provided metadata + based on filterMatchCriteria This list must not be empty and can have at + the most 64 entries. + elements: dict + required: true + type: list + suboptions: + name: + description: + - Name of the metadata label. The length must be between 1 and 1024 characters, + inclusive. + required: true + type: str + value: + description: + - The value that the label must match. The value has a maximum length + of 1024 characters. + required: true + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + network: + description: + - This field is not used for external load balancing. + - For INTERNAL_SELF_MANAGED load balancing, this field identifies the network + that the load balanced IP should belong to for this global forwarding rule. + If this field is not specified, the default network will be used. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + type: dict + port_range: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed + to ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: * + TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43, + 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25, + 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: + 500, 4500 .' + required: false + type: str + target: + description: + - The URL of the target resource to receive the matched traffic. + - The forwarded traffic must be of a type appropriate to the target object. + - For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid. + - ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) For global address with a purpose of PRIVATE_SERVICE_CONNECT and addressType + of INTERNAL, only "all-apis" and "vpc-sc" are valid. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a global address + google.cloud.gcp_compute_global_address: + name: globaladdress-globalforwardingrule + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: globaladdress + +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-globalforwardingrule + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: httphealthcheck-globalforwardingrule + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-globalforwardingrule + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + enable_cdn: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a URL map + google.cloud.gcp_compute_url_map: + name: urlmap-globalforwardingrule + default_service: "{{ backendservice }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: urlmap + +- name: create a target HTTP proxy + google.cloud.gcp_compute_target_http_proxy: + name: targethttpproxy-globalforwardingrule + url_map: "{{ urlmap }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: httpproxy + +- name: create a global forwarding rule + google.cloud.gcp_compute_global_forwarding_rule: + name: test_object + ip_address: "{{ globaladdress.address }}" + ip_protocol: TCP + port_range: 80-80 + target: "{{ httpproxy.selfLink }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +IPAddress: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the address + must be a global IP, and for regional forwarding rules, the address must live + in the same region as the forwarding rule. If this field is empty, an ephemeral + IPv4 address from the same scope (global or regional) will be assigned. A regional + forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 + or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address + belonging to the network/subnet configured for the forwarding rule. By default, + if this field is empty, an ephemeral internal IP address will be automatically + allocated from the IP range of the subnet or network configured for this forwarding + rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address) + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + returned: success + type: str +IPProtocol: + description: + - The IP protocol to which this rule applies. When the load balancing scheme is + INTERNAL_SELF_MANAGED, only TCP is valid. This field must not be set if the global + address is configured as a purpose of PRIVATE_SERVICE_CONNECT and addressType + of INTERNAL . + returned: success + type: str +ipVersion: + description: + - The IP Version that will be used by this global forwarding rule. + returned: success + type: str +loadBalancingScheme: + description: + - This signifies what the GlobalForwardingRule will be used for. + - 'The value of INTERNAL_SELF_MANAGED means that this will be used for Internal + Global HTTP(S) LB. The value of EXTERNAL means that this will be used for External + Global Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy) ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) Note: This field must be set "" if the global address is configured as a + purpose of PRIVATE_SERVICE_CONNECT and addressType of INTERNAL.' + returned: success + type: str +metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing configuration + to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, + xDS clients present node metadata. If a match takes place, the relevant routing + configuration is made available to those proxies. + - For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, + at least one of the filterLabels must match the corresponding label provided in + the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its + filterLabels must match with corresponding labels in the provided metadata. + - metadataFilters specified here can be overridden by those specified in the UrlMap + that this ForwardingRule references. + - metadataFilters only applies to Loadbalancers that have their loadBalancingScheme + set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterMatchCriteria: + description: + - Specifies how individual filterLabel matches within the list of filterLabels + contribute towards the overall metadataFilter match. + - MATCH_ANY - At least one of the filterLabels must have a matching label in + the provided metadata. + - MATCH_ALL - All filterLabels must have matching labels in the provided metadata. + returned: success + type: str + filterLabels: + description: + - The list of label value pairs that must match labels in the provided metadata + based on filterMatchCriteria This list must not be empty and can have at the + most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of the metadata label. The length must be between 1 and 1024 characters, + inclusive. + returned: success + type: str + value: + description: + - The value that the label must match. The value has a maximum length of + 1024 characters. + returned: success + type: str +name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +network: + description: + - This field is not used for external load balancing. + - For INTERNAL_SELF_MANAGED load balancing, this field identifies the network that + the load balanced IP should belong to for this global forwarding rule. If this + field is not specified, the default network will be used. + returned: success + type: dict +portRange: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to + ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: * TargetHttpProxy: + 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43, 110, 143, 195, 443, + 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25, 43, 110, 143, 195, 443, + 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: 500, 4500 .' + returned: success + type: str +target: + description: + - The URL of the target resource to receive the matched traffic. + - The forwarded traffic must be of a type appropriate to the target object. + - For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid. + - ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) For global address with a purpose of PRIVATE_SERVICE_CONNECT and addressType + of INTERNAL, only "all-apis" and "vpc-sc" are valid. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + ip_address=dict(type='str'), + ip_protocol=dict(type='str'), + ip_version=dict(type='str'), + load_balancing_scheme=dict(default='EXTERNAL', type='str'), + metadata_filters=dict( + type='list', + elements='dict', + options=dict( + filter_match_criteria=dict(required=True, type='str'), + filter_labels=dict( + required=True, type='list', elements='dict', options=dict(name=dict(required=True, type='str'), value=dict(required=True, type='str')) + ), + ), + ), + name=dict(required=True, type='str'), + network=dict(type='dict'), + port_range=dict(type='str'), + target=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#forwardingRule' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('target') != request.get('target'): + target_update(module, request, response) + + +def target_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/forwardingRules/{name}/setTarget"]).format(**module.params), + {u'target': module.params.get('target')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#forwardingRule', + u'description': module.params.get('description'), + u'IPAddress': module.params.get('ip_address'), + u'IPProtocol': module.params.get('ip_protocol'), + u'ipVersion': module.params.get('ip_version'), + u'loadBalancingScheme': module.params.get('load_balancing_scheme'), + u'metadataFilters': GlobalForwardingRuleMetadatafiltersArray(module.params.get('metadata_filters', []), module).to_request(), + u'name': module.params.get('name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'portRange': module.params.get('port_range'), + u'target': module.params.get('target'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/forwardingRules/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/forwardingRules".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'IPAddress': response.get(u'IPAddress'), + u'IPProtocol': response.get(u'IPProtocol'), + u'ipVersion': response.get(u'ipVersion'), + u'loadBalancingScheme': response.get(u'loadBalancingScheme'), + u'metadataFilters': GlobalForwardingRuleMetadatafiltersArray(response.get(u'metadataFilters', []), module).from_response(), + u'name': response.get(u'name'), + u'network': response.get(u'network'), + u'portRange': response.get(u'portRange'), + u'target': response.get(u'target'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#forwardingRule') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class GlobalForwardingRuleMetadatafiltersArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'filterMatchCriteria': item.get('filter_match_criteria'), + u'filterLabels': GlobalForwardingRuleFilterlabelsArray(item.get('filter_labels', []), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'filterMatchCriteria': item.get(u'filterMatchCriteria'), + u'filterLabels': GlobalForwardingRuleFilterlabelsArray(item.get(u'filterLabels', []), self.module).from_response(), + } + ) + + +class GlobalForwardingRuleFilterlabelsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'value': item.get('value')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'value': item.get(u'value')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule_info.py new file mode 100644 index 000000000..292489cbf --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_global_forwarding_rule_info.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_global_forwarding_rule_info +description: +- Gather info for GCP GlobalForwardingRule +short_description: Gather info for GCP GlobalForwardingRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a global forwarding rule + gcp_compute_global_forwarding_rule_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + IPAddress: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the + address must be a global IP, and for regional forwarding rules, the address + must live in the same region as the forwarding rule. If this field is empty, + an ephemeral IPv4 address from the same scope (global or regional) will be + assigned. A regional forwarding rule supports IPv4 only. A global forwarding + rule supports either IPv4 or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP + address belonging to the network/subnet configured for the forwarding rule. + By default, if this field is empty, an ephemeral internal IP address will + be automatically allocated from the IP range of the subnet or network configured + for this forwarding rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address) + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + returned: success + type: str + IPProtocol: + description: + - The IP protocol to which this rule applies. When the load balancing scheme + is INTERNAL_SELF_MANAGED, only TCP is valid. This field must not be set if + the global address is configured as a purpose of PRIVATE_SERVICE_CONNECT and + addressType of INTERNAL . + returned: success + type: str + ipVersion: + description: + - The IP Version that will be used by this global forwarding rule. + returned: success + type: str + loadBalancingScheme: + description: + - This signifies what the GlobalForwardingRule will be used for. + - 'The value of INTERNAL_SELF_MANAGED means that this will be used for Internal + Global HTTP(S) LB. The value of EXTERNAL means that this will be used for + External Global Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy) + ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) Note: This field must be set "" if the global address is configured + as a purpose of PRIVATE_SERVICE_CONNECT and addressType of INTERNAL.' + returned: success + type: str + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing configuration + to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, + xDS clients present node metadata. If a match takes place, the relevant routing + configuration is made available to those proxies. + - For each metadataFilter in this list, if its filterMatchCriteria is set to + MATCH_ANY, at least one of the filterLabels must match the corresponding label + provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, + then all of its filterLabels must match with corresponding labels in the provided + metadata. + - metadataFilters specified here can be overridden by those specified in the + UrlMap that this ForwardingRule references. + - metadataFilters only applies to Loadbalancers that have their loadBalancingScheme + set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterMatchCriteria: + description: + - Specifies how individual filterLabel matches within the list of filterLabels + contribute towards the overall metadataFilter match. + - MATCH_ANY - At least one of the filterLabels must have a matching label + in the provided metadata. + - MATCH_ALL - All filterLabels must have matching labels in the provided + metadata. + returned: success + type: str + filterLabels: + description: + - The list of label value pairs that must match labels in the provided metadata + based on filterMatchCriteria This list must not be empty and can have + at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of the metadata label. The length must be between 1 and 1024 + characters, inclusive. + returned: success + type: str + value: + description: + - The value that the label must match. The value has a maximum length + of 1024 characters. + returned: success + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - This field is not used for external load balancing. + - For INTERNAL_SELF_MANAGED load balancing, this field identifies the network + that the load balanced IP should belong to for this global forwarding rule. + If this field is not specified, the default network will be used. + returned: success + type: dict + portRange: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed + to ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: + * TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, + 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: + 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: + 500, 4500 .' + returned: success + type: str + target: + description: + - The URL of the target resource to receive the matched traffic. + - The forwarded traffic must be of a type appropriate to the target object. + - For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are + valid. + - ([Beta](U(https://terraform.io/docs/providers/google/guides/provider_versions.html)) + only) For global address with a purpose of PRIVATE_SERVICE_CONNECT and addressType + of INTERNAL, only "all-apis" and "vpc-sc" are valid. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/forwardingRules".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check.py new file mode 100644 index 000000000..e48b07c64 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check.py @@ -0,0 +1,1406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_health_check +description: +- Health Checks determine whether instances are responsive and able to do work. +- They are an important part of a comprehensive load balancing configuration, as they + enable monitoring instances behind load balancers. +- Health Checks poll instances at a specified interval. Instances that do not respond + successfully to some number of probes in a row are marked as unhealthy. No new connections + are sent to unhealthy instances, though existing connections will continue. The + health check will continue to poll unhealthy instances. If an instance later responds + successfully to some number of consecutive probes, it is marked healthy again and + can receive new connections. +- "~>**NOTE**: Legacy HTTP(S) health checks must be used for target pool-based network + load balancers. See the [official guide](U(https://cloud.google.com/load-balancing/docs/health-check-concepts#selecting_hc)) + for choosing a type of health check." +short_description: Creates a GCP HealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + check_interval_sec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + required: false + default: '5' + type: int + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + healthy_threshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + required: false + default: '2' + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + timeout_sec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + required: false + default: '5' + type: int + aliases: + - timeout_seconds + unhealthy_threshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + required: false + default: '2' + type: int + type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not + specified, the default is TCP. Exactly one of the protocol-specific health check + field must be specified, which must match type field. + - 'Some valid choices include: "TCP", "SSL", "HTTP", "HTTPS", "HTTP2"' + required: false + type: str + http_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + required: false + type: str + request_path: + description: + - The request path of the HTTP health check request. + - The default value is /. + required: false + default: "/" + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + https_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + required: false + type: str + request_path: + description: + - The request path of the HTTPS health check request. + - The default value is /. + required: false + default: "/" + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + tcp_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + required: false + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, TCP health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + ssl_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + required: false + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, SSL health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + http2_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + required: false + type: str + request_path: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + required: false + default: "/" + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + grpc_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + port: + description: + - The port number for the health check request. + - Must be specified if portName and portSpecification are not set or if port_specification + is USE_FIXED_PORT. Valid values are 1 through 65535. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, gRPC health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + grpc_service_name: + description: + - The gRPC service name for the health check. + - 'The value of grpcServiceName has the following meanings by convention: + - Empty serviceName means the overall status of all services at the backend.' + - "- Non-empty serviceName means the health of that gRPC service, as defined + by the owner of the service." + - The grpcServiceName can only be ASCII. + required: false + type: str + log_config: + description: + - Configure logging on this health check. + required: false + type: dict + suboptions: + enable: + description: + - Indicates whether or not to export logs. This is false by default, which + means no health check logging will be done. + required: false + default: 'false' + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks)' +- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/health-checks)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a health check + google.cloud.gcp_compute_health_check: + name: test_object + type: TCP + tcp_health_check: + port_name: service-health + request: ping + response: pong + healthy_threshold: 10 + timeout_sec: 2 + unhealthy_threshold: 5 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater value + than checkIntervalSec. + returned: success + type: int +unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not + specified, the default is TCP. Exactly one of the protocol-specific health check + field must be specified, which must match type field. + returned: success + type: str +httpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str +httpsHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str +tcpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, TCP health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str +sslHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, SSL health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str +http2HealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str +grpcHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + port: + description: + - The port number for the health check request. + - Must be specified if portName and portSpecification are not set or if port_specification + is USE_FIXED_PORT. Valid values are 1 through 65535. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, gRPC health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str + grpcServiceName: + description: + - The gRPC service name for the health check. + - 'The value of grpcServiceName has the following meanings by convention: - + Empty serviceName means the overall status of all services at the backend.' + - "- Non-empty serviceName means the health of that gRPC service, as defined + by the owner of the service." + - The grpcServiceName can only be ASCII. + returned: success + type: str +logConfig: + description: + - Configure logging on this health check. + returned: success + type: complex + contains: + enable: + description: + - Indicates whether or not to export logs. This is false by default, which means + no health check logging will be done. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + check_interval_sec=dict(default=5, type='int'), + description=dict(type='str'), + healthy_threshold=dict(default=2, type='int'), + name=dict(required=True, type='str'), + timeout_sec=dict(default=5, type='int', aliases=['timeout_seconds']), + unhealthy_threshold=dict(default=2, type='int'), + type=dict(type='str'), + http_health_check=dict( + type='dict', + options=dict( + host=dict(type='str'), + request_path=dict(default='/', type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + https_health_check=dict( + type='dict', + options=dict( + host=dict(type='str'), + request_path=dict(default='/', type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + tcp_health_check=dict( + type='dict', + options=dict( + request=dict(type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + ssl_health_check=dict( + type='dict', + options=dict( + request=dict(type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + http2_health_check=dict( + type='dict', + options=dict( + host=dict(type='str'), + request_path=dict(default='/', type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + grpc_health_check=dict( + type='dict', + options=dict(port=dict(type='int'), port_name=dict(type='str'), port_specification=dict(type='str'), grpc_service_name=dict(type='str')), + ), + log_config=dict(type='dict', options=dict(enable=dict(type='bool'))), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#healthCheck' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#healthCheck', + u'checkIntervalSec': module.params.get('check_interval_sec'), + u'description': module.params.get('description'), + u'healthyThreshold': module.params.get('healthy_threshold'), + u'name': module.params.get('name'), + u'timeoutSec': module.params.get('timeout_sec'), + u'unhealthyThreshold': module.params.get('unhealthy_threshold'), + u'type': module.params.get('type'), + u'httpHealthCheck': HealthCheckHttphealthcheck(module.params.get('http_health_check', {}), module).to_request(), + u'httpsHealthCheck': HealthCheckHttpshealthcheck(module.params.get('https_health_check', {}), module).to_request(), + u'tcpHealthCheck': HealthCheckTcphealthcheck(module.params.get('tcp_health_check', {}), module).to_request(), + u'sslHealthCheck': HealthCheckSslhealthcheck(module.params.get('ssl_health_check', {}), module).to_request(), + u'http2HealthCheck': HealthCheckHttp2healthcheck(module.params.get('http2_health_check', {}), module).to_request(), + u'grpcHealthCheck': HealthCheckGrpchealthcheck(module.params.get('grpc_health_check', {}), module).to_request(), + u'logConfig': HealthCheckLogconfig(module.params.get('log_config', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/healthChecks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/healthChecks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'checkIntervalSec': response.get(u'checkIntervalSec'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'healthyThreshold': response.get(u'healthyThreshold'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'timeoutSec': response.get(u'timeoutSec'), + u'unhealthyThreshold': response.get(u'unhealthyThreshold'), + u'type': response.get(u'type'), + u'httpHealthCheck': HealthCheckHttphealthcheck(response.get(u'httpHealthCheck', {}), module).from_response(), + u'httpsHealthCheck': HealthCheckHttpshealthcheck(response.get(u'httpsHealthCheck', {}), module).from_response(), + u'tcpHealthCheck': HealthCheckTcphealthcheck(response.get(u'tcpHealthCheck', {}), module).from_response(), + u'sslHealthCheck': HealthCheckSslhealthcheck(response.get(u'sslHealthCheck', {}), module).from_response(), + u'http2HealthCheck': HealthCheckHttp2healthcheck(response.get(u'http2HealthCheck', {}), module).from_response(), + u'grpcHealthCheck': HealthCheckGrpchealthcheck(response.get(u'grpcHealthCheck', {}), module).from_response(), + u'logConfig': HealthCheckLogconfig(response.get(u'logConfig', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#healthCheck') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class HealthCheckHttphealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'host': self.request.get('host'), + u'requestPath': self.request.get('request_path'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'host': self.request.get(u'host'), + u'requestPath': self.request.get(u'requestPath'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class HealthCheckHttpshealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'host': self.request.get('host'), + u'requestPath': self.request.get('request_path'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'host': self.request.get(u'host'), + u'requestPath': self.request.get(u'requestPath'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class HealthCheckTcphealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'request': self.request.get('request'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'request': self.request.get(u'request'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class HealthCheckSslhealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'request': self.request.get('request'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'request': self.request.get(u'request'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class HealthCheckHttp2healthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'host': self.request.get('host'), + u'requestPath': self.request.get('request_path'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'host': self.request.get(u'host'), + u'requestPath': self.request.get(u'requestPath'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class HealthCheckGrpchealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'portSpecification': self.request.get('port_specification'), + u'grpcServiceName': self.request.get('grpc_service_name'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'portSpecification': self.request.get(u'portSpecification'), + u'grpcServiceName': self.request.get(u'grpcServiceName'), + } + ) + + +class HealthCheckLogconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enable': self.request.get('enable')}) + + def from_response(self): + return remove_nones_from_dict({u'enable': self.request.get(u'enable')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check_info.py new file mode 100644 index 000000000..3f323c7f5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_health_check_info.py @@ -0,0 +1,581 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_health_check_info +description: +- Gather info for GCP HealthCheck +short_description: Gather info for GCP HealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a health check + gcp_compute_health_check_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int + type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If + not specified, the default is TCP. Exactly one of the protocol-specific health + check field must be specified, which must match type field. + returned: success + type: str + httpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTP health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + httpsHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + tcpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the + connection establishment alone will indicate health. The request data + can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, TCP health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + sslHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the + connection establishment alone will indicate health. The request data + can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, SSL health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + http2HealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + grpcHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + port: + description: + - The port number for the health check request. + - Must be specified if portName and portSpecification are not set or if + port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, gRPC health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + grpcServiceName: + description: + - The gRPC service name for the health check. + - 'The value of grpcServiceName has the following meanings by convention: + - Empty serviceName means the overall status of all services at the backend.' + - "- Non-empty serviceName means the health of that gRPC service, as defined + by the owner of the service." + - The grpcServiceName can only be ASCII. + returned: success + type: str + logConfig: + description: + - Configure logging on this health check. + returned: success + type: complex + contains: + enable: + description: + - Indicates whether or not to export logs. This is false by default, which + means no health check logging will be done. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/healthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check.py new file mode 100644 index 000000000..c5da8434f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check.py @@ -0,0 +1,458 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_http_health_check +description: +- An HttpHealthCheck resource. This resource defines a template for how individual + VMs should be checked for health, via HTTP. +short_description: Creates a GCP HttpHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + check_interval_sec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + required: false + default: '5' + type: int + aliases: + - check_interval_seconds + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + healthy_threshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + required: false + type: int + host: + description: + - The value of the host header in the HTTP health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + required: false + type: int + request_path: + description: + - The request path of the HTTP health check request. + - The default value is /. + required: false + type: str + timeout_sec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + required: false + type: int + aliases: + - timeout_seconds + unhealthy_threshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + required: false + type: int + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/httpHealthChecks)' +- 'Adding Health Checks: U(https://cloud.google.com/compute/docs/load-balancing/health-checks#legacy_health_checks)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: test_object + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int +host: + description: + - The value of the host header in the HTTP health check request. If left empty (default + value), the public IP on behalf of which this health check is performed will be + used. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int +requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str +timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater value + than checkIntervalSec. + returned: success + type: int +unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + check_interval_sec=dict(default=5, type='int', aliases=['check_interval_seconds']), + description=dict(type='str'), + healthy_threshold=dict(type='int'), + host=dict(type='str'), + name=dict(required=True, type='str'), + port=dict(type='int'), + request_path=dict(type='str'), + timeout_sec=dict(type='int', aliases=['timeout_seconds']), + unhealthy_threshold=dict(type='int'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#httpHealthCheck' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#httpHealthCheck', + u'checkIntervalSec': module.params.get('check_interval_sec'), + u'description': module.params.get('description'), + u'healthyThreshold': module.params.get('healthy_threshold'), + u'host': module.params.get('host'), + u'name': module.params.get('name'), + u'port': module.params.get('port'), + u'requestPath': module.params.get('request_path'), + u'timeoutSec': module.params.get('timeout_sec'), + u'unhealthyThreshold': module.params.get('unhealthy_threshold'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'checkIntervalSec': response.get(u'checkIntervalSec'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'healthyThreshold': response.get(u'healthyThreshold'), + u'host': response.get(u'host'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'port': response.get(u'port'), + u'requestPath': response.get(u'requestPath'), + u'timeoutSec': response.get(u'timeoutSec'), + u'unhealthyThreshold': response.get(u'unhealthyThreshold'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#httpHealthCheck') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check_info.py new file mode 100644 index 000000000..2c77d1282 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_http_health_check_info.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_http_health_check_info +description: +- Gather info for GCP HttpHealthCheck +short_description: Gather info for GCP HttpHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a HTTP health check + gcp_compute_http_health_check_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + host: + description: + - The value of the host header in the HTTP health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check.py new file mode 100644 index 000000000..8a60ce15d --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_https_health_check +description: +- An HttpsHealthCheck resource. This resource defines a template for how individual + VMs should be checked for health, via HTTPS. +short_description: Creates a GCP HttpsHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + check_interval_sec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + required: false + type: int + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + healthy_threshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + required: false + type: int + host: + description: + - The value of the host header in the HTTPS health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + required: false + type: int + request_path: + description: + - The request path of the HTTPS health check request. + - The default value is /. + required: false + type: str + timeout_sec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + required: false + type: int + aliases: + - timeout_seconds + unhealthy_threshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + required: false + type: int + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/httpsHealthChecks)' +- 'Adding Health Checks: U(https://cloud.google.com/compute/docs/load-balancing/health-checks#legacy_health_checks)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a HTTPS health check + google.cloud.gcp_compute_https_health_check: + name: test_object + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int +host: + description: + - The value of the host header in the HTTPS health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int +requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str +timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater value + than checkIntervalSec. + returned: success + type: int +unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + check_interval_sec=dict(type='int'), + description=dict(type='str'), + healthy_threshold=dict(type='int'), + host=dict(type='str'), + name=dict(required=True, type='str'), + port=dict(type='int'), + request_path=dict(type='str'), + timeout_sec=dict(type='int', aliases=['timeout_seconds']), + unhealthy_threshold=dict(type='int'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#httpsHealthCheck' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#httpsHealthCheck', + u'checkIntervalSec': module.params.get('check_interval_sec'), + u'description': module.params.get('description'), + u'healthyThreshold': module.params.get('healthy_threshold'), + u'host': module.params.get('host'), + u'name': module.params.get('name'), + u'port': module.params.get('port'), + u'requestPath': module.params.get('request_path'), + u'timeoutSec': module.params.get('timeout_sec'), + u'unhealthyThreshold': module.params.get('unhealthy_threshold'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'checkIntervalSec': response.get(u'checkIntervalSec'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'healthyThreshold': response.get(u'healthyThreshold'), + u'host': response.get(u'host'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'port': response.get(u'port'), + u'requestPath': response.get(u'requestPath'), + u'timeoutSec': response.get(u'timeoutSec'), + u'unhealthyThreshold': response.get(u'unhealthyThreshold'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#httpsHealthCheck') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check_info.py new file mode 100644 index 000000000..0ff65140d --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_https_health_check_info.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_https_health_check_info +description: +- Gather info for GCP HttpsHealthCheck +short_description: Gather info for GCP HttpsHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a HTTPS health check + gcp_compute_https_health_check_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + host: + description: + - The value of the host header in the HTTPS health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_image.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_image.py new file mode 100644 index 000000000..c7080564b --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_image.py @@ -0,0 +1,872 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_image +description: +- Represents an Image resource. +- Google Compute Engine uses operating system images to create the root persistent + disks for your instances. You specify an image when you create an instance. Images + contain a boot loader, an operating system, and a root file system. Linux operating + system images are also capable of running containers on Compute Engine. +- Images can be either public or custom. +- Public images are provided and maintained by Google, open-source communities, and + third-party vendors. By default, all projects have access to these images and can + use them to create instances. Custom images are available only to your project. + You can create a custom image from root persistent disks and other images. Then, + use the custom image to create an instance. +short_description: Creates a GCP Image +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + disk_size_gb: + description: + - Size of the image when restored onto a persistent disk (in GB). + required: false + type: int + family: + description: + - The name of the image family to which this image belongs. You can create disks + by specifying an image family instead of a specific image name. The image family + always returns its latest image that is not deprecated. The name of the image + family must comply with RFC1035. + required: false + type: str + guest_os_features: + description: + - A list of features to enable on the guest operating system. + - Applicable only for bootable images. + elements: dict + required: false + type: list + suboptions: + type: + description: + - The type of supported feature. + - 'Some valid choices include: "MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", + "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC"' + required: true + type: str + image_encryption_key: + description: + - Encrypts the image using a customer-supplied encryption key. + - After you encrypt an image with a customer-supplied key, you must provide the + same key if you use the image later (e.g. to create a disk from the image) . + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + labels: + description: + - Labels to apply to this Image. + required: false + type: dict + licenses: + description: + - Any applicable license URI. + elements: str + required: false + type: list + name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + raw_disk: + description: + - The parameters of the raw disk image. + required: false + type: dict + suboptions: + container_type: + description: + - The format used to encode and transmit the block device, which should be + TAR. This is just a container and transmission format and not a runtime + format. Provided by the client when the disk image is created. + - 'Some valid choices include: "TAR"' + required: false + type: str + sha1_checksum: + description: + - An optional SHA1 checksum of the disk image before unpackaging. + - This is provided by the client when the disk image is created. + required: false + type: str + source: + description: + - The full Google Cloud Storage URL where disk storage is stored You must + provide either this property or the sourceDisk property but not both. + required: true + type: str + source_disk: + description: + - The source disk to create this image based on. + - You must provide either this property or the rawDisk.source property but not + both to create an image. + - 'This field represents a link to a Disk resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_disk task and then set this source_disk field to "{{ name-of-resource + }}"' + required: false + type: dict + source_disk_encryption_key: + description: + - The customer-supplied encryption key of the source disk. Required if the source + disk is protected by a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + source_disk_id: + description: + - The ID value of the disk used to create this image. This value may be used to + determine whether the image was taken from the current or a previous instance + of a given disk name. + required: false + type: str + source_image: + description: + - 'URL of the source image used to create this image. In order to create an image, + you must provide the full or partial URL of one of the following: * The selfLink + URL * This property * The rawDisk.source URL * The sourceDisk URL .' + - 'This field represents a link to a Image resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_image task and then set this source_image field to "{{ name-of-resource + }}"' + required: false + type: dict + source_snapshot: + description: + - URL of the source snapshot used to create this image. + - 'In order to create an image, you must provide the full or partial URL of one + of the following: * The selfLink URL * This property * The sourceImage URL * + The rawDisk.source URL * The sourceDisk URL .' + - 'This field represents a link to a Snapshot resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_snapshot task and then set this source_snapshot field to "{{ + name-of-resource }}"' + required: false + type: dict + source_type: + description: + - The type of the image used to create this disk. The default and only value is + RAW . + - 'Some valid choices include: "RAW"' + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/images)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/images)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a disk + google.cloud.gcp_compute_disk: + name: disk-image + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: disk + +- name: create a image + google.cloud.gcp_compute_image: + name: test_object + source_disk: "{{ disk }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +archiveSizeBytes: + description: + - Size of the image tar.gz archive stored in Google Cloud Storage (in bytes). + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +deprecated: + description: + - The deprecation status associated with this image. + returned: success + type: complex + contains: + deleted: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to DELETED. This is only informational and the status + will not change unless the client explicitly changes it. + returned: success + type: str + deprecated: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to DEPRECATED. This is only informational and the status + will not change unless the client explicitly changes it. + returned: success + type: str + obsolete: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to OBSOLETE. This is only informational and the status + will not change unless the client explicitly changes it. + returned: success + type: str + replacement: + description: + - The URL of the suggested replacement for a deprecated resource. + - The suggested replacement resource must be the same kind of resource as the + deprecated resource. + returned: success + type: str + state: + description: + - The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, + or DELETED. Operations which create a new resource using a DEPRECATED resource + will return successfully, but with a warning indicating the deprecated resource + and recommending its replacement. Operations which use OBSOLETE or DELETED + resources will be rejected and result in an error. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +diskSizeGb: + description: + - Size of the image when restored onto a persistent disk (in GB). + returned: success + type: int +family: + description: + - The name of the image family to which this image belongs. You can create disks + by specifying an image family instead of a specific image name. The image family + always returns its latest image that is not deprecated. The name of the image + family must comply with RFC1035. + returned: success + type: str +guestOsFeatures: + description: + - A list of features to enable on the guest operating system. + - Applicable only for bootable images. + returned: success + type: complex + contains: + type: + description: + - The type of supported feature. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +imageEncryptionKey: + description: + - Encrypts the image using a customer-supplied encryption key. + - After you encrypt an image with a customer-supplied key, you must provide the + same key if you use the image later (e.g. to create a disk from the image) . + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str +labels: + description: + - Labels to apply to this Image. + returned: success + type: dict +labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str +licenses: + description: + - Any applicable license URI. + returned: success + type: list +name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +rawDisk: + description: + - The parameters of the raw disk image. + returned: success + type: complex + contains: + containerType: + description: + - The format used to encode and transmit the block device, which should be TAR. + This is just a container and transmission format and not a runtime format. + Provided by the client when the disk image is created. + returned: success + type: str + sha1Checksum: + description: + - An optional SHA1 checksum of the disk image before unpackaging. + - This is provided by the client when the disk image is created. + returned: success + type: str + source: + description: + - The full Google Cloud Storage URL where disk storage is stored You must provide + either this property or the sourceDisk property but not both. + returned: success + type: str +sourceDisk: + description: + - The source disk to create this image based on. + - You must provide either this property or the rawDisk.source property but not both + to create an image. + returned: success + type: dict +sourceDiskEncryptionKey: + description: + - The customer-supplied encryption key of the source disk. Required if the source + disk is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str +sourceDiskId: + description: + - The ID value of the disk used to create this image. This value may be used to + determine whether the image was taken from the current or a previous instance + of a given disk name. + returned: success + type: str +sourceImage: + description: + - 'URL of the source image used to create this image. In order to create an image, + you must provide the full or partial URL of one of the following: * The selfLink + URL * This property * The rawDisk.source URL * The sourceDisk URL .' + returned: success + type: dict +sourceSnapshot: + description: + - URL of the source snapshot used to create this image. + - 'In order to create an image, you must provide the full or partial URL of one + of the following: * The selfLink URL * This property * The sourceImage URL * The + rawDisk.source URL * The sourceDisk URL .' + returned: success + type: dict +sourceType: + description: + - The type of the image used to create this disk. The default and only value is + RAW . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + disk_size_gb=dict(type='int'), + family=dict(type='str'), + guest_os_features=dict(type='list', elements='dict', options=dict(type=dict(required=True, type='str'))), + image_encryption_key=dict(type='dict', no_log=True, options=dict(raw_key=dict(type='str'))), + labels=dict(type='dict'), + licenses=dict(type='list', elements='str'), + name=dict(required=True, type='str'), + raw_disk=dict(type='dict', options=dict(container_type=dict(type='str'), sha1_checksum=dict(type='str'), source=dict(required=True, type='str'))), + source_disk=dict(type='dict'), + source_disk_encryption_key=dict(type='dict', no_log=True, options=dict(raw_key=dict(type='str'))), + source_disk_id=dict(type='str'), + source_image=dict(type='dict'), + source_snapshot=dict(type='dict'), + source_type=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#image' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('labels') != request.get('labels'): + labels_update(module, request, response) + + +def labels_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/images/{name}/setLabels"]).format(**module.params), + {u'labels': module.params.get('labels'), u'labelFingerprint': response.get('labelFingerprint')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#image', + u'description': module.params.get('description'), + u'diskSizeGb': module.params.get('disk_size_gb'), + u'family': module.params.get('family'), + u'guestOsFeatures': ImageGuestosfeaturesArray(module.params.get('guest_os_features', []), module).to_request(), + u'imageEncryptionKey': ImageImageencryptionkey(module.params.get('image_encryption_key', {}), module).to_request(), + u'labels': module.params.get('labels'), + u'licenses': module.params.get('licenses'), + u'name': module.params.get('name'), + u'rawDisk': ImageRawdisk(module.params.get('raw_disk', {}), module).to_request(), + u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'selfLink'), + u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(module.params.get('source_disk_encryption_key', {}), module).to_request(), + u'sourceDiskId': module.params.get('source_disk_id'), + u'sourceImage': replace_resource_dict(module.params.get(u'source_image', {}), 'selfLink'), + u'sourceSnapshot': replace_resource_dict(module.params.get(u'source_snapshot', {}), 'selfLink'), + u'sourceType': module.params.get('source_type'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/images/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'archiveSizeBytes': response.get(u'archiveSizeBytes'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'deprecated': ImageDeprecated(response.get(u'deprecated', {}), module).from_response(), + u'description': response.get(u'description'), + u'diskSizeGb': response.get(u'diskSizeGb'), + u'family': response.get(u'family'), + u'guestOsFeatures': ImageGuestosfeaturesArray(response.get(u'guestOsFeatures', []), module).from_response(), + u'id': response.get(u'id'), + u'imageEncryptionKey': ImageImageencryptionkey(response.get(u'imageEncryptionKey', {}), module).from_response(), + u'labels': response.get(u'labels'), + u'labelFingerprint': response.get(u'labelFingerprint'), + u'licenses': response.get(u'licenses'), + u'name': response.get(u'name'), + u'rawDisk': ImageRawdisk(response.get(u'rawDisk', {}), module).from_response(), + u'sourceDisk': response.get(u'sourceDisk'), + u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(response.get(u'sourceDiskEncryptionKey', {}), module).from_response(), + u'sourceDiskId': response.get(u'sourceDiskId'), + u'sourceImage': response.get(u'sourceImage'), + u'sourceSnapshot': response.get(u'sourceSnapshot'), + u'sourceType': response.get(u'sourceType'), + } + + +def license_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1//projects/.*/global/licenses/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1//projects/{project}/global/licenses/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#image') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class ImageDeprecated(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'deleted': self.request.get('deleted'), + u'deprecated': self.request.get('deprecated'), + u'obsolete': self.request.get('obsolete'), + u'replacement': self.request.get('replacement'), + u'state': self.request.get('state'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'deleted': self.request.get(u'deleted'), + u'deprecated': self.request.get(u'deprecated'), + u'obsolete': self.request.get(u'obsolete'), + u'replacement': self.request.get(u'replacement'), + u'state': self.request.get(u'state'), + } + ) + + +class ImageGuestosfeaturesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'type': item.get('type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'type': item.get(u'type')}) + + +class ImageImageencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) + + +class ImageRawdisk(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'containerType': self.request.get('container_type'), u'sha1Checksum': self.request.get('sha1_checksum'), u'source': self.request.get('source')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'containerType': self.request.get(u'containerType'), u'sha1Checksum': self.request.get(u'sha1Checksum'), u'source': self.request.get(u'source')} + ) + + +class ImageSourcediskencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_image_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_image_info.py new file mode 100644 index 000000000..afd396270 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_image_info.py @@ -0,0 +1,405 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_image_info +description: +- Gather info for GCP Image +short_description: Gather info for GCP Image +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an image + gcp_compute_image_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + archiveSizeBytes: + description: + - Size of the image tar.gz archive stored in Google Cloud Storage (in bytes). + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + deprecated: + description: + - The deprecation status associated with this image. + returned: success + type: complex + contains: + deleted: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to DELETED. This is only informational and the status + will not change unless the client explicitly changes it. + returned: success + type: str + deprecated: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to DEPRECATED. This is only informational and the + status will not change unless the client explicitly changes it. + returned: success + type: str + obsolete: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to OBSOLETE. This is only informational and the + status will not change unless the client explicitly changes it. + returned: success + type: str + replacement: + description: + - The URL of the suggested replacement for a deprecated resource. + - The suggested replacement resource must be the same kind of resource as + the deprecated resource. + returned: success + type: str + state: + description: + - The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, + or DELETED. Operations which create a new resource using a DEPRECATED + resource will return successfully, but with a warning indicating the deprecated + resource and recommending its replacement. Operations which use OBSOLETE + or DELETED resources will be rejected and result in an error. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + diskSizeGb: + description: + - Size of the image when restored onto a persistent disk (in GB). + returned: success + type: int + family: + description: + - The name of the image family to which this image belongs. You can create disks + by specifying an image family instead of a specific image name. The image + family always returns its latest image that is not deprecated. The name of + the image family must comply with RFC1035. + returned: success + type: str + guestOsFeatures: + description: + - A list of features to enable on the guest operating system. + - Applicable only for bootable images. + returned: success + type: complex + contains: + type: + description: + - The type of supported feature. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + imageEncryptionKey: + description: + - Encrypts the image using a customer-supplied encryption key. + - After you encrypt an image with a customer-supplied key, you must provide + the same key if you use the image later (e.g. to create a disk from the image) + . + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + labels: + description: + - Labels to apply to this Image. + returned: success + type: dict + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + licenses: + description: + - Any applicable license URI. + returned: success + type: list + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + rawDisk: + description: + - The parameters of the raw disk image. + returned: success + type: complex + contains: + containerType: + description: + - The format used to encode and transmit the block device, which should + be TAR. This is just a container and transmission format and not a runtime + format. Provided by the client when the disk image is created. + returned: success + type: str + sha1Checksum: + description: + - An optional SHA1 checksum of the disk image before unpackaging. + - This is provided by the client when the disk image is created. + returned: success + type: str + source: + description: + - The full Google Cloud Storage URL where disk storage is stored You must + provide either this property or the sourceDisk property but not both. + returned: success + type: str + sourceDisk: + description: + - The source disk to create this image based on. + - You must provide either this property or the rawDisk.source property but not + both to create an image. + returned: success + type: dict + sourceDiskEncryptionKey: + description: + - The customer-supplied encryption key of the source disk. Required if the source + disk is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceDiskId: + description: + - The ID value of the disk used to create this image. This value may be used + to determine whether the image was taken from the current or a previous instance + of a given disk name. + returned: success + type: str + sourceImage: + description: + - 'URL of the source image used to create this image. In order to create an + image, you must provide the full or partial URL of one of the following: * + The selfLink URL * This property * The rawDisk.source URL * The sourceDisk + URL .' + returned: success + type: dict + sourceSnapshot: + description: + - URL of the source snapshot used to create this image. + - 'In order to create an image, you must provide the full or partial URL of + one of the following: * The selfLink URL * This property * The sourceImage + URL * The rawDisk.source URL * The sourceDisk URL .' + returned: success + type: dict + sourceType: + description: + - The type of the image used to create this disk. The default and only value + is RAW . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance.py new file mode 100644 index 000000000..703ec4c2f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance.py @@ -0,0 +1,1900 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance +description: +- An instance is a virtual machine (VM) hosted on Google's infrastructure. +short_description: Creates a GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + can_ip_forward: + description: + - Allows this instance to send and receive packets with non-matching destination + or source IPs. This is required if you plan to use this instance to forward + routes. + required: false + type: bool + aliases: + - ip_forward + deletion_protection: + description: + - Whether the resource should be protected against deletion. + required: false + type: bool + disks: + description: + - An array of disks that are associated with the instances that are created from + this template. + elements: dict + required: false + type: list + suboptions: + auto_delete: + description: + - Specifies whether the disk will be auto-deleted when the instance is deleted + (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks are + not left behind on machine deletion.' + required: false + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the first + partition of the disk for its root filesystem. + required: false + type: bool + device_name: + description: + - Specifies a unique device name of your choice that is reflected into the + /dev/disk/by-id/google-* tree of a Linux operating system running within + the instance. This name can be used to reference the device for mounting, + resizing, and so on, from within the instance. + required: false + type: str + disk_encryption_key: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC + 4648 base64 to either encrypt or decrypt this resource. + required: false + type: str + rsa_encrypted_key: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + required: false + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the boot + disk. For example, if you have many disks attached to an instance, each + disk would have a unique index number. If not specified, the server will + choose an appropriate value. + required: false + type: int + initialize_params: + description: + - Specifies the parameters for a new disk that will be created alongside the + new instance. Use initialization parameters to create boot disks or local + SSDs attached to the new instance. + required: false + type: dict + suboptions: + disk_name: + description: + - Specifies the disk name. If not specified, the default is to use the + name of the instance. + required: false + type: str + disk_size_gb: + description: + - Specifies the size of the disk in base-2 GB. + required: false + type: int + disk_type: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + required: false + type: str + source_image: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. To + create a disk with one of the public operating system images, specify + the image by its family name. + required: false + type: str + aliases: + - image + - image_family + source_image_encryption_key: + description: + - The customer-supplied encryption key of the source image. Required if + the source image is protected by a customer-supplied encryption key. + - Instance templates do not store customer-supplied encryption keys, so + you cannot create disks for instances in a managed instance group if + the source images are encrypted with your own keys. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + required: false + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which is either + SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if you attempt + to attach a persistent disk in any other format than SCSI. + - 'Some valid choices include: "SCSI", "NVME"' + required: false + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If + not specified, the default is to attach the disk in READ_WRITE mode. + - 'Some valid choices include: "READ_WRITE", "READ_ONLY"' + required: false + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks using + this property. This field is only applicable for persistent disks. + - 'This field represents a link to a Disk resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and + value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_disk task and then set this source field + to "{{ name-of-resource }}"' + required: false + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, + the default is PERSISTENT. + - 'Some valid choices include: "SCRATCH", "PERSISTENT"' + required: false + type: str + guest_accelerators: + description: + - List of the type and count of accelerator cards attached to the instance . + elements: dict + required: false + type: list + suboptions: + accelerator_count: + description: + - The number of the guest accelerator cards exposed to this instance. + required: false + type: int + accelerator_type: + description: + - Full or partial URL of the accelerator type resource to expose to this instance. + required: false + type: str + hostname: + description: + - The hostname of the instance to be created. The specified hostname must be RFC1035 + compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal + when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal + when using zonal DNS. + required: false + type: str + labels: + description: + - Labels to apply to this instance. A list of key->value pairs. + required: false + type: dict + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from this + template. These pairs can consist of custom metadata or predefined keys. + required: false + type: dict + machine_type: + description: + - A reference to a machine type which defines VM kind. + required: false + type: str + min_cpu_platform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values are + the friendly names of CPU platforms . + required: false + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The resource name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: false + type: str + network_interfaces: + description: + - An array of configurations for this interface. This specifies how this interface + is configured to interact with other network services, such as connecting to + the internet. Only one network interface is supported per instance. + elements: dict + required: false + type: list + suboptions: + access_configs: + description: + - An array of configurations for this interface. Currently, only one access + config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, + then this instance will have no external internet access. + elements: dict + required: false + type: list + suboptions: + name: + description: + - The name of this access configuration. The default and recommended name + is External NAT but you can use any arbitrary string you would like. + For example, My external IP or Network Access. + required: true + type: str + nat_ip: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the project + or leave this field undefined to use an IP from a shared ephemeral IP + address pool. If you specify a static external IP address, it must live + in the same region as the zone of the instance. + - 'This field represents a link to a Address resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''address'' + and value of your resource''s address Alternatively, you can add `register: + name-of-resource` to a gcp_compute_address task and then set this nat_ip + field to "{{ name-of-resource }}"' + required: false + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + - 'Some valid choices include: "ONE_TO_ONE_NAT"' + required: true + type: str + set_public_ptr: + description: + - Specifies whether a public DNS PTR record should be created to map the + external IP address of the instance to a DNS domain name. + required: false + type: bool + public_ptr_domain_name: + description: + - The DNS domain name for the public PTR record. You can set this field + only if the setPublicPtr field is enabled. + required: false + type: str + network_tier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid external + IP address, an ephemeral IP will be created with this networkTier. If + an AccessConfig with a valid external IP address is specified, it must + match that of the networkTier associated with the Address resource owning + that IP. + - 'Some valid choices include: "PREMIUM", "STANDARD"' + required: false + type: str + alias_ip_ranges: + description: + - An array of alias IP ranges for this network interface. Can only be specified + for network interfaces on subnet-mode networks. + elements: dict + required: false + type: list + suboptions: + ip_cidr_range: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and cannot + contain IP addresses reserved by system or used by other network interfaces. + This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. + /24) or a CIDR format string (e.g. 10.1.2.0/24). + required: false + type: str + subnetwork_range_name: + description: + - Optional subnetwork secondary range name specifying the secondary range + from which to allocate the IP CIDR range for this alias IP range. If + left unspecified, the primary range of the subnetwork will be used. + required: false + type: str + network: + description: + - Specifies the title of an existing network. Not setting the network title + will select the default network interface, which could have SSH already + configured . + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and + value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_network task and then set this network + field to "{{ name-of-resource }}"' + required: false + type: dict + network_ip: + description: + - An IPv4 internal network address to assign to the instance for this network + interface. If not specified by the user, an unused internal IP is assigned + by the system. + required: false + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is optional. + If the network is in custom subnet mode, then this field should be specified. + - 'This field represents a link to a Subnetwork resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_subnetwork task and then set this subnetwork + field to "{{ name-of-resource }}"' + required: false + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + required: false + type: dict + suboptions: + automatic_restart: + description: + - Specifies whether the instance should be automatically restarted if it is + terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. Preemptible + instances cannot be automatically restarted. + required: false + type: bool + on_host_maintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + required: false + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set during + instance creation, it cannot be set or changed after the instance has been + created. + required: false + type: bool + service_accounts: + description: + - A list of service accounts, with their specified scopes, authorized for this + instance. Only one service account per VM instance is supported. + elements: dict + required: false + type: list + suboptions: + email: + description: + - Email address of the service account. + required: false + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + elements: str + required: false + type: list + shielded_instance_config: + description: + - Configuration for various parameters related to shielded instances. + required: false + type: dict + suboptions: + enable_secure_boot: + description: + - Defines whether the instance has Secure Boot enabled. + required: false + type: bool + enable_vtpm: + description: + - Defines whether the instance has the vTPM enabled. + required: false + type: bool + enable_integrity_monitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + required: false + type: bool + confidential_instance_config: + description: + - Configuration for confidential computing (requires setting the machine type + to any of the n2d-* types and a boot disk of type pd-ssd). + required: false + type: dict + suboptions: + enable_confidential_compute: + description: + - Enables confidential computing. + required: false + type: bool + status: + description: + - 'The status of the instance. One of the following values: PROVISIONING, STAGING, + RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.' + - As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine + off . + - 'Some valid choices include: "PROVISIONING", "STAGING", "RUNNING", "STOPPING", + "SUSPENDING", "SUSPENDED", "TERMINATED"' + required: false + type: str + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid sources + or targets for network firewalls and are specified by the client during instance + creation. The tags can be later modified by the setTags method. Each tag within + the list must comply with RFC1035. + required: false + type: dict + suboptions: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash of + the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes after + every request to modify or update metadata. You must always provide an up-to-date + fingerprint hash in order to update or change metadata. + required: false + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply with + RFC1035. + elements: str + required: false + type: list + zone: + description: + - A reference to the zone where the machine resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a disk + google.cloud.gcp_compute_disk: + name: disk-instance + size_gb: 50 + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: disk + +- name: create a network + google.cloud.gcp_compute_network: + name: network-instance + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a address + google.cloud.gcp_compute_address: + name: address-instance + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a instance + google.cloud.gcp_compute_instance: + name: test_object + machine_type: n1-standard-1 + disks: + - auto_delete: 'true' + boot: 'true' + source: "{{ disk }}" + - auto_delete: 'true' + interface: NVME + type: SCRATCH + initialize_params: + disk_type: local-ssd + metadata: + startup-script-url: gs:://graphite-playground/bootstrap.sh + cost-center: '12345' + labels: + environment: production + network_interfaces: + - network: "{{ network }}" + access_configs: + - name: External NAT + nat_ip: "{{ address }}" + type: ONE_TO_ONE_NAT + zone: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +canIpForward: + description: + - Allows this instance to send and receive packets with non-matching destination + or source IPs. This is required if you plan to use this instance to forward routes. + returned: success + type: bool +cpuPlatform: + description: + - The CPU platform used by this instance. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +deletionProtection: + description: + - Whether the resource should be protected against deletion. + returned: success + type: bool +disks: + description: + - An array of disks that are associated with the instances that are created from + this template. + returned: success + type: complex + contains: + autoDelete: + description: + - Specifies whether the disk will be auto-deleted when the instance is deleted + (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks are not + left behind on machine deletion.' + returned: success + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the first + partition of the disk for its root filesystem. + returned: success + type: bool + deviceName: + description: + - Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* + tree of a Linux operating system running within the instance. This name can + be used to reference the device for mounting, resizing, and so on, from within + the instance. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + rsaEncryptedKey: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the boot + disk. For example, if you have many disks attached to an instance, each disk + would have a unique index number. If not specified, the server will choose + an appropriate value. + returned: success + type: int + initializeParams: + description: + - Specifies the parameters for a new disk that will be created alongside the + new instance. Use initialization parameters to create boot disks or local + SSDs attached to the new instance. + returned: success + type: complex + contains: + diskName: + description: + - Specifies the disk name. If not specified, the default is to use the name + of the instance. + returned: success + type: str + diskSizeGb: + description: + - Specifies the size of the disk in base-2 GB. + returned: success + type: int + diskType: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + returned: success + type: str + sourceImage: + description: + - The source image to create this disk. When creating a new instance, one + of initializeParams.sourceImage or disks.source is required. To create + a disk with one of the public operating system images, specify the image + by its family name. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required if + the source image is protected by a customer-supplied encryption key. + - Instance templates do not store customer-supplied encryption keys, so + you cannot create disks for instances in a managed instance group if the + source images are encrypted with your own keys. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC + 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which is either + SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if you attempt + to attach a persistent disk in any other format than SCSI. + returned: success + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If + not specified, the default is to attach the disk in READ_WRITE mode. + returned: success + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks using this + property. This field is only applicable for persistent disks. + returned: success + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, + the default is PERSISTENT. + returned: success + type: str +guestAccelerators: + description: + - List of the type and count of accelerator cards attached to the instance . + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + acceleratorType: + description: + - Full or partial URL of the accelerator type resource to expose to this instance. + returned: success + type: str +hostname: + description: + - The hostname of the instance to be created. The specified hostname must be RFC1035 + compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal + when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal + when using zonal DNS. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str +labels: + description: + - Labels to apply to this instance. A list of key->value pairs. + returned: success + type: dict +metadata: + description: + - The metadata key/value pairs to assign to instances that are created from this + template. These pairs can consist of custom metadata or predefined keys. + returned: success + type: dict +machineType: + description: + - A reference to a machine type which defines VM kind. See https://cloud.google.com/compute/docs/machine-types + for a list of current valid machine types. + returned: success + type: str +minCpuPlatform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values are the + friendly names of CPU platforms . + returned: success + type: str +name: + description: + - The name of the resource, provided by the client when initially creating the resource. + The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +networkInterfaces: + description: + - An array of configurations for this interface. This specifies how this interface + is configured to interact with other network services, such as connecting to the + internet. Only one network interface is supported per instance. + returned: success + type: complex + contains: + accessConfigs: + description: + - An array of configurations for this interface. Currently, only one access + config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, + then this instance will have no external internet access. + returned: success + type: complex + contains: + name: + description: + - The name of this access configuration. The default and recommended name + is External NAT but you can use any arbitrary string you would like. For + example, My external IP or Network Access. + returned: success + type: str + natIP: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the project + or leave this field undefined to use an IP from a shared ephemeral IP + address pool. If you specify a static external IP address, it must live + in the same region as the zone of the instance. + returned: success + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + returned: success + type: str + setPublicPtr: + description: + - Specifies whether a public DNS PTR record should be created to map the + external IP address of the instance to a DNS domain name. + returned: success + type: bool + publicPtrDomainName: + description: + - The DNS domain name for the public PTR record. You can set this field + only if the setPublicPtr field is enabled. + returned: success + type: str + networkTier: + description: + - This signifies the networking tier used for configuring this access configuration. + If an AccessConfig is specified without a valid external IP address, an + ephemeral IP will be created with this networkTier. If an AccessConfig + with a valid external IP address is specified, it must match that of the + networkTier associated with the Address resource owning that IP. + returned: success + type: str + aliasIpRanges: + description: + - An array of alias IP ranges for this network interface. Can only be specified + for network interfaces on subnet-mode networks. + returned: success + type: complex + contains: + ipCidrRange: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and cannot + contain IP addresses reserved by system or used by other network interfaces. + This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. + /24) or a CIDR format string (e.g. 10.1.2.0/24). + returned: success + type: str + subnetworkRangeName: + description: + - Optional subnetwork secondary range name specifying the secondary range + from which to allocate the IP CIDR range for this alias IP range. If left + unspecified, the primary range of the subnetwork will be used. + returned: success + type: str + name: + description: + - The name of the network interface, generated by the server. For network devices, + these are eth0, eth1, etc . + returned: success + type: str + network: + description: + - Specifies the title of an existing network. Not setting the network title + will select the default network interface, which could have SSH already configured + . + returned: success + type: dict + networkIP: + description: + - An IPv4 internal network address to assign to the instance for this network + interface. If not specified by the user, an unused internal IP is assigned + by the system. + returned: success + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. If + the network is in auto subnet mode, providing the subnetwork is optional. + If the network is in custom subnet mode, then this field should be specified. + returned: success + type: dict +scheduling: + description: + - Sets the scheduling options for this instance. + returned: success + type: complex + contains: + automaticRestart: + description: + - Specifies whether the instance should be automatically restarted if it is + terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. Preemptible + instances cannot be automatically restarted. + returned: success + type: bool + onHostMaintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default and + only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + returned: success + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set during instance + creation, it cannot be set or changed after the instance has been created. + returned: success + type: bool +serviceAccounts: + description: + - A list of service accounts, with their specified scopes, authorized for this instance. + Only one service account per VM instance is supported. + returned: success + type: complex + contains: + email: + description: + - Email address of the service account. + returned: success + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + returned: success + type: list +shieldedInstanceConfig: + description: + - Configuration for various parameters related to shielded instances. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + returned: success + type: bool + enableVtpm: + description: + - Defines whether the instance has the vTPM enabled. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + returned: success + type: bool +confidentialInstanceConfig: + description: + - Configuration for confidential computing (requires setting the machine type to + any of the n2d-* types and a boot disk of type pd-ssd). + returned: success + type: complex + contains: + enableConfidentialCompute: + description: + - Enables confidential computing. + returned: success + type: bool +status: + description: + - 'The status of the instance. One of the following values: PROVISIONING, STAGING, + RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.' + - As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine + off . + returned: success + type: str +statusMessage: + description: + - An optional, human-readable explanation of the status. + returned: success + type: str +tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid sources + or targets for network firewalls and are specified by the client during instance + creation. The tags can be later modified by the setTags method. Each tag within + the list must comply with RFC1035. + returned: success + type: complex + contains: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash of the + metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes after + every request to modify or update metadata. You must always provide an up-to-date + fingerprint hash in order to update or change metadata. + returned: success + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. + returned: success + type: list +zone: + description: + - A reference to the zone where the machine resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + can_ip_forward=dict(type='bool', aliases=['ip_forward']), + deletion_protection=dict(type='bool'), + disks=dict( + type='list', + elements='dict', + options=dict( + auto_delete=dict(type='bool'), + boot=dict(type='bool'), + device_name=dict(type='str'), + disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'), rsa_encrypted_key=dict(type='str'))), + index=dict(type='int'), + initialize_params=dict( + type='dict', + options=dict( + disk_name=dict(type='str'), + disk_size_gb=dict(type='int'), + disk_type=dict(type='str'), + source_image=dict(type='str', aliases=['image', 'image_family']), + source_image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))), + ), + ), + interface=dict(type='str'), + mode=dict(type='str'), + source=dict(type='dict'), + type=dict(type='str'), + ), + ), + guest_accelerators=dict(type='list', elements='dict', options=dict(accelerator_count=dict(type='int'), accelerator_type=dict(type='str'))), + hostname=dict(type='str'), + labels=dict(type='dict'), + metadata=dict(type='dict'), + machine_type=dict(type='str'), + min_cpu_platform=dict(type='str'), + name=dict(type='str'), + network_interfaces=dict( + type='list', + elements='dict', + options=dict( + access_configs=dict( + type='list', + elements='dict', + options=dict( + name=dict(required=True, type='str'), + nat_ip=dict(type='dict'), + type=dict(required=True, type='str'), + set_public_ptr=dict(type='bool'), + public_ptr_domain_name=dict(type='str'), + network_tier=dict(type='str'), + ), + ), + alias_ip_ranges=dict(type='list', elements='dict', options=dict(ip_cidr_range=dict(type='str'), subnetwork_range_name=dict(type='str'))), + network=dict(type='dict'), + network_ip=dict(type='str'), + subnetwork=dict(type='dict'), + ), + ), + scheduling=dict( + type='dict', options=dict(automatic_restart=dict(type='bool'), on_host_maintenance=dict(type='str'), preemptible=dict(type='bool')) + ), + service_accounts=dict(type='list', elements='dict', options=dict(email=dict(type='str'), scopes=dict(type='list', elements='str'))), + shielded_instance_config=dict( + type='dict', options=dict(enable_secure_boot=dict(type='bool'), enable_vtpm=dict(type='bool'), enable_integrity_monitoring=dict(type='bool')) + ), + confidential_instance_config=dict(type='dict', options=dict(enable_confidential_compute=dict(type='bool'))), + status=dict(type='str'), + tags=dict(type='dict', options=dict(fingerprint=dict(type='str'), items=dict(type='list', elements='str'))), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#instance' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + if fetch: + instance = InstancePower(module, fetch.get('status')) + instance.run() + if module.params.get('status'): + fetch.update({'status': module.params['status']}) + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('deletionProtection') != request.get('deletionProtection'): + deletion_protection_update(module, request, response) + if response.get('labels') != request.get('labels'): + label_fingerprint_update(module, request, response) + if response.get('machineType') != request.get('machineType'): + machine_type_update(module, request, response) + if response.get('shieldedInstanceConfig') != request.get('shieldedInstanceConfig'): + shielded_instance_config_update(module, request, response) + + +def label_fingerprint_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setLabels"]).format(**module.params), + {u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')}, + ) + + +def machine_type_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setMachineType"]).format(**module.params), + {u'machineType': machine_type_selflink(module.params.get('machine_type'), module.params)}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#instance', + u'canIpForward': module.params.get('can_ip_forward'), + u'deletionProtection': module.params.get('deletion_protection'), + u'disks': InstanceDisksArray(module.params.get('disks', []), module).to_request(), + u'guestAccelerators': InstanceGuestacceleratorsArray(module.params.get('guest_accelerators', []), module).to_request(), + u'hostname': module.params.get('hostname'), + u'labels': module.params.get('labels'), + u'metadata': module.params.get('metadata'), + u'machineType': machine_type_selflink(module.params.get('machine_type'), module.params), + u'minCpuPlatform': module.params.get('min_cpu_platform'), + u'name': module.params.get('name'), + u'networkInterfaces': InstanceNetworkinterfacesArray(module.params.get('network_interfaces', []), module).to_request(), + u'scheduling': InstanceScheduling(module.params.get('scheduling', {}), module).to_request(), + u'serviceAccounts': InstanceServiceaccountsArray(module.params.get('service_accounts', []), module).to_request(), + u'shieldedInstanceConfig': InstanceShieldedinstanceconfig(module.params.get('shielded_instance_config', {}), module).to_request(), + u'confidentialInstanceConfig': InstanceConfidentialinstanceconfig(module.params.get('confidential_instance_config', {}), module).to_request(), + u'status': module.params.get('status'), + u'tags': InstanceTags(module.params.get('tags', {}), module).to_request(), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'canIpForward': response.get(u'canIpForward'), + u'cpuPlatform': response.get(u'cpuPlatform'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'deletionProtection': response.get(u'deletionProtection'), + u'disks': InstanceDisksArray(module.params.get('disks', []), module).to_request(), + u'guestAccelerators': InstanceGuestacceleratorsArray(response.get(u'guestAccelerators', []), module).from_response(), + u'hostname': response.get(u'hostname'), + u'id': response.get(u'id'), + u'labelFingerprint': response.get(u'labelFingerprint'), + u'labels': response.get(u'labels'), + u'metadata': response.get(u'metadata'), + u'machineType': response.get(u'machineType'), + u'minCpuPlatform': response.get(u'minCpuPlatform'), + u'name': response.get(u'name'), + u'networkInterfaces': InstanceNetworkinterfacesArray(response.get(u'networkInterfaces', []), module).from_response(), + u'scheduling': InstanceScheduling(response.get(u'scheduling', {}), module).from_response(), + u'serviceAccounts': InstanceServiceaccountsArray(response.get(u'serviceAccounts', []), module).from_response(), + u'shieldedInstanceConfig': InstanceShieldedinstanceconfig(response.get(u'shieldedInstanceConfig', {}), module).from_response(), + u'confidentialInstanceConfig': InstanceConfidentialinstanceconfig(response.get(u'confidentialInstanceConfig', {}), module).from_response(), + u'status': response.get(u'status'), + u'statusMessage': response.get(u'statusMessage'), + u'tags': InstanceTags(response.get(u'tags', {}), module).from_response(), + } + + +def disk_type_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/zones/.*/diskTypes/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/%s".format(**params) % name + return name + + +def machine_type_selflink(name, params): + if name is None: + return + url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*/machineTypes/.*" + if not re.match(url, name): + name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/machineTypes/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + response = fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instance') + if response: + return decode_response(response, module) + else: + return {} + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def encode_request(request, module): + if 'metadata' in request and request['metadata'] is not None: + request['metadata'] = metadata_encoder(request['metadata']) + return request + + +def decode_response(response, module): + if 'metadata' in response and response['metadata'] is not None: + response['metadata'] = metadata_decoder(response['metadata']) + return response + + +# TODO(alexstephen): Implement updating metadata on existing resources. + +# Expose instance 'metadata' as a simple name/value pair hash. However the API +# defines metadata as a NestedObject with the following layout: +# +# metadata { +# fingerprint: 'hash-of-last-metadata' +# items: [ +# { +# key: 'metadata1-key' +# value: 'metadata1-value' +# }, +# ... +# ] +# } +# +def metadata_encoder(metadata): + metadata_new = [] + for key in metadata: + value = metadata[key] + metadata_new.append({"key": key, "value": value}) + return {'items': metadata_new} + + +# Map metadata.items[]{key:,value:} => metadata[key]=value +def metadata_decoder(metadata): + items = {} + if 'items' in metadata: + metadata_items = metadata['items'] + for item in metadata_items: + items[item['key']] = item['value'] + return items + + +class InstancePower(object): + def __init__(self, module, current_status): + self.module = module + self.current_status = current_status + self.desired_status = self.module.params.get('status') + + def run(self): + # GcpRequest handles unicode text handling + if GcpRequest({'status': self.current_status}) == GcpRequest({'status': self.desired_status}): + return + elif self.desired_status == 'RUNNING': + self.start() + elif self.desired_status == 'TERMINATED': + self.stop() + elif self.desired_status == 'SUSPENDED': + self.module.fail_json(msg="Instances cannot be suspended using Ansible") + + def start(self): + auth = GcpSession(self.module, 'compute') + wait_for_operation(self.module, auth.post(self._start_url())) + + def stop(self): + auth = GcpSession(self.module, 'compute') + wait_for_operation(self.module, auth.post(self._stop_url())) + + def _start_url(self): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/start".format(**self.module.params) + + def _stop_url(self): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/stop".format(**self.module.params) + + +def deletion_protection_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join( + [ + "https://www.googleapis.com/compute/v1/", + "projects/{project}/zones/{zone}/instances/{name}/setDeletionProtection?deletionProtection={deletion_protection}", + ] + ).format(**module.params), + {}, + ) + + +def shielded_instance_config_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/updateShieldedInstanceConfig"]).format( + **module.params + ), + { + u'enableSecureBoot': navigate_hash(module.params, ['shielded_instance_config', 'enable_secure_boot']), + u'enableVtpm': navigate_hash(module.params, ['shielded_instance_config', 'enable_vtpm']), + u'enableIntegrityMonitoring': navigate_hash(module.params, ['shielded_instance_config', 'enable_integrity_monitoring']), + }, + ) + + +class InstanceDisksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'autoDelete': item.get('auto_delete'), + u'boot': item.get('boot'), + u'deviceName': item.get('device_name'), + u'diskEncryptionKey': InstanceDiskencryptionkey(item.get('disk_encryption_key', {}), self.module).to_request(), + u'index': item.get('index'), + u'initializeParams': InstanceInitializeparams(item.get('initialize_params', {}), self.module).to_request(), + u'interface': item.get('interface'), + u'mode': item.get('mode'), + u'source': replace_resource_dict(item.get(u'source', {}), 'selfLink'), + u'type': item.get('type'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'autoDelete': item.get(u'autoDelete'), + u'boot': item.get(u'boot'), + u'deviceName': item.get(u'deviceName'), + u'diskEncryptionKey': InstanceDiskencryptionkey(item.get(u'diskEncryptionKey', {}), self.module).from_response(), + u'index': item.get(u'index'), + u'initializeParams': InstanceInitializeparams(self.module.params.get('initialize_params', {}), self.module).to_request(), + u'interface': item.get(u'interface'), + u'mode': item.get(u'mode'), + u'source': item.get(u'source'), + u'type': item.get(u'type'), + } + ) + + +class InstanceDiskencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'rsaEncryptedKey': self.request.get('rsa_encrypted_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'rsaEncryptedKey': self.request.get(u'rsaEncryptedKey')}) + + +class InstanceInitializeparams(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'diskName': self.request.get('disk_name'), + u'diskSizeGb': self.request.get('disk_size_gb'), + u'diskType': disk_type_selflink(self.request.get('disk_type'), self.module.params), + u'sourceImage': self.request.get('source_image'), + u'sourceImageEncryptionKey': InstanceSourceimageencryptionkey(self.request.get('source_image_encryption_key', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'diskName': self.request.get(u'diskName'), + u'diskSizeGb': self.request.get(u'diskSizeGb'), + u'diskType': self.request.get(u'diskType'), + u'sourceImage': self.request.get(u'sourceImage'), + u'sourceImageEncryptionKey': InstanceSourceimageencryptionkey(self.request.get(u'sourceImageEncryptionKey', {}), self.module).from_response(), + } + ) + + +class InstanceSourceimageencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) + + +class InstanceGuestacceleratorsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get('accelerator_count'), u'acceleratorType': item.get('accelerator_type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get(u'acceleratorCount'), u'acceleratorType': item.get(u'acceleratorType')}) + + +class InstanceNetworkinterfacesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'accessConfigs': InstanceAccessconfigsArray(item.get('access_configs', []), self.module).to_request(), + u'aliasIpRanges': InstanceAliasiprangesArray(item.get('alias_ip_ranges', []), self.module).to_request(), + u'network': replace_resource_dict(item.get(u'network', {}), 'selfLink'), + u'networkIP': item.get('network_ip'), + u'subnetwork': replace_resource_dict(item.get(u'subnetwork', {}), 'selfLink'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'accessConfigs': InstanceAccessconfigsArray(item.get(u'accessConfigs', []), self.module).from_response(), + u'aliasIpRanges': InstanceAliasiprangesArray(item.get(u'aliasIpRanges', []), self.module).from_response(), + u'network': item.get(u'network'), + u'networkIP': item.get(u'networkIP'), + u'subnetwork': item.get(u'subnetwork'), + } + ) + + +class InstanceAccessconfigsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get('name'), + u'natIP': replace_resource_dict(item.get(u'nat_ip', {}), 'address'), + u'type': item.get('type'), + u'setPublicPtr': item.get('set_public_ptr'), + u'publicPtrDomainName': item.get('public_ptr_domain_name'), + u'networkTier': item.get('network_tier'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get(u'name'), + u'natIP': item.get(u'natIP'), + u'type': item.get(u'type'), + u'setPublicPtr': item.get(u'setPublicPtr'), + u'publicPtrDomainName': item.get(u'publicPtrDomainName'), + u'networkTier': item.get(u'networkTier'), + } + ) + + +class InstanceAliasiprangesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'ipCidrRange': item.get('ip_cidr_range'), u'subnetworkRangeName': item.get('subnetwork_range_name')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'ipCidrRange': item.get(u'ipCidrRange'), u'subnetworkRangeName': item.get(u'subnetworkRangeName')}) + + +class InstanceScheduling(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'automaticRestart': self.request.get('automatic_restart'), + u'onHostMaintenance': self.request.get('on_host_maintenance'), + u'preemptible': self.request.get('preemptible'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'automaticRestart': self.request.get(u'automaticRestart'), + u'onHostMaintenance': self.request.get(u'onHostMaintenance'), + u'preemptible': self.request.get(u'preemptible'), + } + ) + + +class InstanceServiceaccountsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'email': item.get('email'), u'scopes': item.get('scopes')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'email': item.get(u'email'), u'scopes': item.get(u'scopes')}) + + +class InstanceShieldedinstanceconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'enableSecureBoot': self.request.get('enable_secure_boot'), + u'enableVtpm': self.request.get('enable_vtpm'), + u'enableIntegrityMonitoring': self.request.get('enable_integrity_monitoring'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'enableSecureBoot': self.request.get(u'enableSecureBoot'), + u'enableVtpm': self.request.get(u'enableVtpm'), + u'enableIntegrityMonitoring': self.request.get(u'enableIntegrityMonitoring'), + } + ) + + +class InstanceConfidentialinstanceconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enableConfidentialCompute': self.request.get('enable_confidential_compute')}) + + def from_response(self): + return remove_nones_from_dict({u'enableConfidentialCompute': self.request.get(u'enableConfidentialCompute')}) + + +class InstanceTags(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'fingerprint': self.request.get('fingerprint'), u'items': self.request.get('items')}) + + def from_response(self): + return remove_nones_from_dict({u'fingerprint': self.request.get(u'fingerprint'), u'items': self.request.get(u'items')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group.py new file mode 100644 index 000000000..419e78845 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group.py @@ -0,0 +1,570 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_group +description: +- Represents an Instance Group resource. Instance groups are self-managed and can + contain identical or different instances. Instance groups do not use an instance + template. Unlike managed instance groups, you must create and add instances to an + instance group manually. +short_description: Creates a GCP InstanceGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + name: + description: + - The name of the instance group. + - The name must be 1-63 characters long, and comply with RFC1035. + required: false + type: str + named_ports: + description: + - Assigns a name to a port number. + - 'For example: {name: "http", port: 80}.' + - This allows the system to reference ports by the assigned name instead of a + port number. Named ports can also contain multiple ports. + - 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports + apply to all instances in this instance group.' + elements: dict + required: false + type: list + suboptions: + name: + description: + - The name for this named port. + - The name must be 1-63 characters long, and comply with RFC1035. + required: false + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + required: false + type: int + network: + description: + - The network to which all instances in the instance group belong. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + type: dict + region: + description: + - The region where the instance group is located (for regional resources). + required: false + type: str + subnetwork: + description: + - The subnetwork to which all instances in the instance group belong. + - 'This field represents a link to a Subnetwork resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_subnetwork task and then set this subnetwork field to "{{ name-of-resource + }}"' + required: false + type: dict + zone: + description: + - A reference to the zone where the instance group resides. + required: true + type: str + instances: + description: + - The list of instances associated with this InstanceGroup. + - All instances must be created before being added to an InstanceGroup. + - All instances not in this list will be removed from the InstanceGroup and will + not be deleted. + - Only the full identifier of the instance will be returned. + elements: dict + required: false + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instancegroup + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: test_object + named_ports: + - name: ansible + port: 1234 + network: "{{ network }}" + zone: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - A unique identifier for this instance group. + returned: success + type: int +name: + description: + - The name of the instance group. + - The name must be 1-63 characters long, and comply with RFC1035. + returned: success + type: str +namedPorts: + description: + - Assigns a name to a port number. + - 'For example: {name: "http", port: 80}.' + - This allows the system to reference ports by the assigned name instead of a port + number. Named ports can also contain multiple ports. + - 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named ports + apply to all instances in this instance group.' + returned: success + type: complex + contains: + name: + description: + - The name for this named port. + - The name must be 1-63 characters long, and comply with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int +network: + description: + - The network to which all instances in the instance group belong. + returned: success + type: dict +region: + description: + - The region where the instance group is located (for regional resources). + returned: success + type: str +subnetwork: + description: + - The subnetwork to which all instances in the instance group belong. + returned: success + type: dict +zone: + description: + - A reference to the zone where the instance group resides. + returned: success + type: str +instances: + description: + - The list of instances associated with this InstanceGroup. + - All instances must be created before being added to an InstanceGroup. + - All instances not in this list will be removed from the InstanceGroup and will + not be deleted. + - Only the full identifier of the instance will be returned. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(type='str'), + named_ports=dict(type='list', elements='dict', options=dict(name=dict(type='str'), port=dict(type='int'))), + network=dict(type='dict'), + region=dict(type='str'), + subnetwork=dict(type='dict'), + zone=dict(required=True, type='str'), + instances=dict(type='list', elements='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#instanceGroup' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + if fetch: + instance = InstanceLogic(module) + instance.run() + fetch.update({'instances': instance.list_instances()}) + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + instance = InstanceLogic(module) + instance.run() + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#instanceGroup', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'namedPorts': InstanceGroupNamedportsArray(module.params.get('named_ports', []), module).to_request(), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'region': region_selflink(module.params.get('region'), module.params), + u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'namedPorts': InstanceGroupNamedportsArray(response.get(u'namedPorts', []), module).from_response(), + u'network': response.get(u'network'), + u'region': response.get(u'region'), + u'subnetwork': response.get(u'subnetwork'), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroup') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class InstanceLogic(object): + def __init__(self, module): + self.module = module + self.current_instances = self.list_instances() + self.module_instances = [] + + # Transform module list of instances (dicts of instance responses) into a list of selfLinks. + instances = self.module.params.get('instances') + if instances: + for instance in instances: + self.module_instances.append(replace_resource_dict(instance, 'selfLink')) + + def run(self): + # Find all instances to add and add them + instances_to_add = list(set(self.module_instances) - set(self.current_instances)) + if instances_to_add: + self.add_instances(instances_to_add) + + # Find all instances to remove and remove them + instances_to_remove = list(set(self.current_instances) - set(self.module_instances)) + if instances_to_remove: + self.remove_instances(instances_to_remove) + + def list_instances(self): + auth = GcpSession(self.module, 'compute') + response = return_if_object(self.module, auth.post(self._list_instances_url(), {'instanceState': 'ALL'}), 'compute#instanceGroupsListInstances') + + # Transform instance list into a list of selfLinks for diffing with module parameters + instances = [] + for instance in response.get('items', []): + instances.append(instance['instance']) + return instances + + def add_instances(self, instances): + auth = GcpSession(self.module, 'compute') + wait_for_operation(self.module, auth.post(self._add_instances_url(), self._build_request(instances))) + + def remove_instances(self, instances): + auth = GcpSession(self.module, 'compute') + wait_for_operation(self.module, auth.post(self._remove_instances_url(), self._build_request(instances))) + + def _list_instances_url(self): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}/listInstances".format(**self.module.params) + + def _remove_instances_url(self): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}/removeInstances".format(**self.module.params) + + def _add_instances_url(self): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}/addInstances".format(**self.module.params) + + def _build_request(self, instances): + request = {'instances': []} + for instance in instances: + request['instances'].append({'instance': instance}) + return request + + +class InstanceGroupNamedportsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'port': item.get('port')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'port': item.get(u'port')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_info.py new file mode 100644 index 000000000..6a3ec0c3b --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_info.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_group_info +description: +- Gather info for GCP InstanceGroup +short_description: Gather info for GCP InstanceGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - A reference to the zone where the instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance group + gcp_compute_instance_group_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - A unique identifier for this instance group. + returned: success + type: int + name: + description: + - The name of the instance group. + - The name must be 1-63 characters long, and comply with RFC1035. + returned: success + type: str + namedPorts: + description: + - Assigns a name to a port number. + - 'For example: {name: "http", port: 80}.' + - This allows the system to reference ports by the assigned name instead of + a port number. Named ports can also contain multiple ports. + - 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named + ports apply to all instances in this instance group.' + returned: success + type: complex + contains: + name: + description: + - The name for this named port. + - The name must be 1-63 characters long, and comply with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int + network: + description: + - The network to which all instances in the instance group belong. + returned: success + type: dict + region: + description: + - The region where the instance group is located (for regional resources). + returned: success + type: str + subnetwork: + description: + - The subnetwork to which all instances in the instance group belong. + returned: success + type: dict + zone: + description: + - A reference to the zone where the instance group resides. + returned: success + type: str + instances: + description: + - The list of instances associated with this InstanceGroup. + - All instances must be created before being added to an InstanceGroup. + - All instances not in this list will be removed from the InstanceGroup and + will not be deleted. + - Only the full identifier of the instance will be returned. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager.py new file mode 100644 index 000000000..6b6c05f16 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager.py @@ -0,0 +1,630 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_group_manager +description: +- Creates a managed instance group using the information that you specify in the request. + After the group is created, it schedules an action to create instances in the group + using the specified instance template. This operation is marked as DONE when the + group is created even if the instances in the group have not yet been created. You + must separately verify the status of the individual instances. +- A managed instance group can have up to 1000 VM instances per group. +short_description: Creates a GCP InstanceGroupManager +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + base_instance_name: + description: + - The base instance name to use for instances in this group. The value must be + 1-58 characters long. Instances are named by appending a hyphen and a random + four-character string to the base instance name. + - The base instance name must comply with RFC1035. + required: true + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + instance_template: + description: + - The instance template that is specified for this managed instance group. The + group uses this template to create all new instances in the managed instance + group. + - 'This field represents a link to a InstanceTemplate resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_instance_template task and then set this + instance_template field to "{{ name-of-resource }}"' + required: true + type: dict + name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + required: true + type: str + named_ports: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + elements: dict + required: false + type: list + suboptions: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and + comply with RFC1035. + required: false + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + required: false + type: int + target_pools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed + instance group. + elements: dict + required: false + type: list + target_size: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this + number. + required: false + type: int + zone: + description: + - The zone the managed instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instancetemplate + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a address + google.cloud.gcp_compute_address: + name: address-instancetemplate + region: us-west1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a instance template + google.cloud.gcp_compute_instance_template: + name: "{{ resource_name }}" + properties: + disks: + - auto_delete: 'true' + boot: 'true' + initialize_params: + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + machine_type: n1-standard-1 + network_interfaces: + - network: "{{ network }}" + access_configs: + - name: test-config + type: ONE_TO_ONE_NAT + nat_ip: "{{ address }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancetemplate + +- name: create a instance group manager + google.cloud.gcp_compute_instance_group_manager: + name: test_object + base_instance_name: test1-child + instance_template: "{{ instancetemplate }}" + target_size: 3 + zone: us-west1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +baseInstanceName: + description: + - The base instance name to use for instances in this group. The value must be 1-58 + characters long. Instances are named by appending a hyphen and a random four-character + string to the base instance name. + - The base instance name must comply with RFC1035. + returned: success + type: str +creationTimestamp: + description: + - The creation timestamp for this managed instance group in RFC3339 text format. + returned: success + type: str +currentActions: + description: + - The list of instance actions and the number of instances in this managed instance + group that are scheduled for each of those actions. + returned: success + type: complex + contains: + abandoning: + description: + - The total number of instances in the managed instance group that are scheduled + to be abandoned. Abandoning an instance removes it from the managed instance + group without deleting it. + returned: success + type: int + creating: + description: + - The number of instances in the managed instance group that are scheduled to + be created or are currently being created. If the group fails to create any + of these instances, it tries again until it creates the instance successfully. + - If you have disabled creation retries, this field will not be populated; instead, + the creatingWithoutRetries field will be populated. + returned: success + type: int + creatingWithoutRetries: + description: + - The number of instances that the managed instance group will attempt to create. + The group attempts to create each instance only once. If the group fails to + create any of these instances, it decreases the group's targetSize value accordingly. + returned: success + type: int + deleting: + description: + - The number of instances in the managed instance group that are scheduled to + be deleted or are currently being deleted. + returned: success + type: int + none: + description: + - The number of instances in the managed instance group that are running and + have no scheduled actions. + returned: success + type: int + recreating: + description: + - The number of instances in the managed instance group that are scheduled to + be recreated or are currently being being recreated. + - Recreating an instance deletes the existing root persistent disk and creates + a new disk from the image that is defined in the instance template. + returned: success + type: int + refreshing: + description: + - The number of instances in the managed instance group that are being reconfigured + with properties that do not require a restart or a recreate action. For example, + setting or removing target pools for the instance. + returned: success + type: int + restarting: + description: + - The number of instances in the managed instance group that are scheduled to + be restarted or are currently being restarted. + returned: success + type: int +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - A unique identifier for this resource. + returned: success + type: int +instanceGroup: + description: + - The instance group being managed. + returned: success + type: dict +instanceTemplate: + description: + - The instance template that is specified for this managed instance group. The group + uses this template to create all new instances in the managed instance group. + returned: success + type: dict +name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + returned: success + type: str +namedPorts: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + returned: success + type: complex + contains: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and comply + with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int +region: + description: + - The region this managed instance group resides (for regional resources). + returned: success + type: str +targetPools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed instance + group. + returned: success + type: list +targetSize: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this number. + returned: success + type: int +zone: + description: + - The zone the managed instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + base_instance_name=dict(required=True, type='str'), + description=dict(type='str'), + instance_template=dict(required=True, type='dict'), + name=dict(required=True, type='str'), + named_ports=dict(type='list', elements='dict', options=dict(name=dict(type='str'), port=dict(type='int'))), + target_pools=dict(type='list', elements='dict'), + target_size=dict(type='int'), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#instanceGroupManager' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#instanceGroupManager', + u'baseInstanceName': module.params.get('base_instance_name'), + u'description': module.params.get('description'), + u'instanceTemplate': replace_resource_dict(module.params.get(u'instance_template', {}), 'selfLink'), + u'name': module.params.get('name'), + u'namedPorts': InstanceGroupManagerNamedportsArray(module.params.get('named_ports', []), module).to_request(), + u'targetPools': replace_resource_dict(module.params.get('target_pools', []), 'selfLink'), + u'targetSize': module.params.get('target_size'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'baseInstanceName': response.get(u'baseInstanceName'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'currentActions': InstanceGroupManagerCurrentactions(response.get(u'currentActions', {}), module).from_response(), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'instanceGroup': response.get(u'instanceGroup'), + u'instanceTemplate': response.get(u'instanceTemplate'), + u'name': response.get(u'name'), + u'namedPorts': InstanceGroupManagerNamedportsArray(response.get(u'namedPorts', []), module).from_response(), + u'region': response.get(u'region'), + u'targetPools': response.get(u'targetPools'), + u'targetSize': response.get(u'targetSize'), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroupManager') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class InstanceGroupManagerCurrentactions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({}) + + def from_response(self): + return remove_nones_from_dict({}) + + +class InstanceGroupManagerNamedportsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'port': item.get('port')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'port': item.get(u'port')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager_info.py new file mode 100644 index 000000000..47ec986bb --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_group_manager_info.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_group_manager_info +description: +- Gather info for GCP InstanceGroupManager +short_description: Gather info for GCP InstanceGroupManager +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - The zone the managed instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance group manager + gcp_compute_instance_group_manager_info: + zone: us-west1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + baseInstanceName: + description: + - The base instance name to use for instances in this group. The value must + be 1-58 characters long. Instances are named by appending a hyphen and a random + four-character string to the base instance name. + - The base instance name must comply with RFC1035. + returned: success + type: str + creationTimestamp: + description: + - The creation timestamp for this managed instance group in RFC3339 text format. + returned: success + type: str + currentActions: + description: + - The list of instance actions and the number of instances in this managed instance + group that are scheduled for each of those actions. + returned: success + type: complex + contains: + abandoning: + description: + - The total number of instances in the managed instance group that are scheduled + to be abandoned. Abandoning an instance removes it from the managed instance + group without deleting it. + returned: success + type: int + creating: + description: + - The number of instances in the managed instance group that are scheduled + to be created or are currently being created. If the group fails to create + any of these instances, it tries again until it creates the instance successfully. + - If you have disabled creation retries, this field will not be populated; + instead, the creatingWithoutRetries field will be populated. + returned: success + type: int + creatingWithoutRetries: + description: + - The number of instances that the managed instance group will attempt to + create. The group attempts to create each instance only once. If the group + fails to create any of these instances, it decreases the group's targetSize + value accordingly. + returned: success + type: int + deleting: + description: + - The number of instances in the managed instance group that are scheduled + to be deleted or are currently being deleted. + returned: success + type: int + none: + description: + - The number of instances in the managed instance group that are running + and have no scheduled actions. + returned: success + type: int + recreating: + description: + - The number of instances in the managed instance group that are scheduled + to be recreated or are currently being being recreated. + - Recreating an instance deletes the existing root persistent disk and creates + a new disk from the image that is defined in the instance template. + returned: success + type: int + refreshing: + description: + - The number of instances in the managed instance group that are being reconfigured + with properties that do not require a restart or a recreate action. For + example, setting or removing target pools for the instance. + returned: success + type: int + restarting: + description: + - The number of instances in the managed instance group that are scheduled + to be restarted or are currently being restarted. + returned: success + type: int + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - A unique identifier for this resource. + returned: success + type: int + instanceGroup: + description: + - The instance group being managed. + returned: success + type: dict + instanceTemplate: + description: + - The instance template that is specified for this managed instance group. The + group uses this template to create all new instances in the managed instance + group. + returned: success + type: dict + name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + returned: success + type: str + namedPorts: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + returned: success + type: complex + contains: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and + comply with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int + region: + description: + - The region this managed instance group resides (for regional resources). + returned: success + type: str + targetPools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed + instance group. + returned: success + type: list + targetSize: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this + number. + returned: success + type: int + zone: + description: + - The zone the managed instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_info.py new file mode 100644 index 000000000..45ff87553 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_info.py @@ -0,0 +1,685 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: gcp_compute_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/compute/docs/reference/rest/v1/instances/list) + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - A reference to the zone where the machine resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +""" + +EXAMPLES = """ +- name: get info on an instance + gcp_compute_instance_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +""" + +RETURN = """ +resources: + description: List of resources + returned: always + type: complex + contains: + canIpForward: + description: + - Allows this instance to send and receive packets with non-matching destination + or source IPs. This is required if you plan to use this instance to forward + routes. + returned: success + type: bool + cpuPlatform: + description: + - The CPU platform used by this instance. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + deletionProtection: + description: + - Whether the resource should be protected against deletion. + returned: success + type: bool + disks: + description: + - An array of disks that are associated with the instances that are created + from this template. + returned: success + type: complex + contains: + autoDelete: + description: + - Specifies whether the disk will be auto-deleted when the instance is deleted + (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks are + not left behind on machine deletion.' + returned: success + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the first + partition of the disk for its root filesystem. + returned: success + type: bool + deviceName: + description: + - Specifies a unique device name of your choice that is reflected into the + /dev/disk/by-id/google-* tree of a Linux operating system running within + the instance. This name can be used to reference the device for mounting, + resizing, and so on, from within the instance. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC + 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + rsaEncryptedKey: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the boot + disk. For example, if you have many disks attached to an instance, each + disk would have a unique index number. If not specified, the server will + choose an appropriate value. + returned: success + type: int + initializeParams: + description: + - Specifies the parameters for a new disk that will be created alongside + the new instance. Use initialization parameters to create boot disks or + local SSDs attached to the new instance. + returned: success + type: complex + contains: + diskName: + description: + - Specifies the disk name. If not specified, the default is to use the + name of the instance. + returned: success + type: str + diskSizeGb: + description: + - Specifies the size of the disk in base-2 GB. + returned: success + type: int + diskType: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + returned: success + type: str + sourceImage: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. To + create a disk with one of the public operating system images, specify + the image by its family name. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required + if the source image is protected by a customer-supplied encryption + key. + - Instance templates do not store customer-supplied encryption keys, + so you cannot create disks for instances in a managed instance group + if the source images are encrypted with your own keys. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which is + either SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if you + attempt to attach a persistent disk in any other format than SCSI. + returned: success + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE mode. + returned: success + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks using + this property. This field is only applicable for persistent disks. + returned: success + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, + the default is PERSISTENT. + returned: success + type: str + guestAccelerators: + description: + - List of the type and count of accelerator cards attached to the instance . + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + acceleratorType: + description: + - Full or partial URL of the accelerator type resource to expose to this + instance. + returned: success + type: str + hostname: + description: + - The hostname of the instance to be created. The specified hostname must be + RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal + when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal + when using zonal DNS. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + labels: + description: + - Labels to apply to this instance. A list of key->value pairs. + returned: success + type: dict + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from + this template. These pairs can consist of custom metadata or predefined keys. + returned: success + type: dict + machineType: + description: + - A reference to a machine type which defines VM kind. + returned: success + type: str + minCpuPlatform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values are + the friendly names of CPU platforms . + returned: success + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The resource name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + networkInterfaces: + description: + - An array of configurations for this interface. This specifies how this interface + is configured to interact with other network services, such as connecting + to the internet. Only one network interface is supported per instance. + returned: success + type: complex + contains: + accessConfigs: + description: + - An array of configurations for this interface. Currently, only one access + config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, + then this instance will have no external internet access. + returned: success + type: complex + contains: + name: + description: + - The name of this access configuration. The default and recommended + name is External NAT but you can use any arbitrary string you would + like. For example, My external IP or Network Access. + returned: success + type: str + natIP: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the project + or leave this field undefined to use an IP from a shared ephemeral + IP address pool. If you specify a static external IP address, it must + live in the same region as the zone of the instance. + returned: success + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + returned: success + type: str + setPublicPtr: + description: + - Specifies whether a public DNS PTR record should be created to map + the external IP address of the instance to a DNS domain name. + returned: success + type: bool + publicPtrDomainName: + description: + - The DNS domain name for the public PTR record. You can set this field + only if the setPublicPtr field is enabled. + returned: success + type: str + networkTier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid external + IP address, an ephemeral IP will be created with this networkTier. + If an AccessConfig with a valid external IP address is specified, + it must match that of the networkTier associated with the Address + resource owning that IP. + returned: success + type: str + aliasIpRanges: + description: + - An array of alias IP ranges for this network interface. Can only be specified + for network interfaces on subnet-mode networks. + returned: success + type: complex + contains: + ipCidrRange: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and cannot + contain IP addresses reserved by system or used by other network interfaces. + This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. + /24) or a CIDR format string (e.g. 10.1.2.0/24). + returned: success + type: str + subnetworkRangeName: + description: + - Optional subnetwork secondary range name specifying the secondary + range from which to allocate the IP CIDR range for this alias IP range. + If left unspecified, the primary range of the subnetwork will be used. + returned: success + type: str + name: + description: + - The name of the network interface, generated by the server. For network + devices, these are eth0, eth1, etc . + returned: success + type: str + network: + description: + - Specifies the title of an existing network. Not setting the network title + will select the default network interface, which could have SSH already + configured . + returned: success + type: dict + networkIP: + description: + - An IPv4 internal network address to assign to the instance for this network + interface. If not specified by the user, an unused internal IP is assigned + by the system. + returned: success + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is optional. + If the network is in custom subnet mode, then this field should be specified. + returned: success + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + returned: success + type: complex + contains: + automaticRestart: + description: + - Specifies whether the instance should be automatically restarted if it + is terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. + Preemptible instances cannot be automatically restarted. + returned: success + type: bool + onHostMaintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + returned: success + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set during + instance creation, it cannot be set or changed after the instance has + been created. + returned: success + type: bool + serviceAccounts: + description: + - A list of service accounts, with their specified scopes, authorized for this + instance. Only one service account per VM instance is supported. + returned: success + type: complex + contains: + email: + description: + - Email address of the service account. + returned: success + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + returned: success + type: list + shieldedInstanceConfig: + description: + - Configuration for various parameters related to shielded instances. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + returned: success + type: bool + enableVtpm: + description: + - Defines whether the instance has the vTPM enabled. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + returned: success + type: bool + confidentialInstanceConfig: + description: + - Configuration for confidential computing (requires setting the machine type + to any of the n2d-* types and a boot disk of type pd-ssd). + returned: success + type: complex + contains: + enableConfidentialCompute: + description: + - Enables confidential computing. + returned: success + type: bool + status: + description: + - 'The status of the instance. One of the following values: PROVISIONING, STAGING, + RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.' + - As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine + off . + returned: success + type: str + statusMessage: + description: + - An optional, human-readable explanation of the status. + returned: success + type: str + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid + sources or targets for network firewalls and are specified by the client during + instance creation. The tags can be later modified by the setTags method. Each + tag within the list must comply with RFC1035. + returned: success + type: complex + contains: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash + of the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes after + every request to modify or update metadata. You must always provide an + up-to-date fingerprint hash in order to update or change metadata. + returned: success + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply with + RFC1035. + returned: success + type: list + zone: + description: + - A reference to the zone where the machine resides. + returned: success + type: str +""" + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule( + argument_spec=dict( + filters=dict(type="list", elements="str"), + zone=dict(required=True, type="str"), + ) + ) + + if not module.params["scopes"]: + module.params["scopes"] = ["https://www.googleapis.com/auth/compute"] + + return_value = { + "resources": fetch_list( + module, collection(module), query_options(module.params["filters"]) + ) + } + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances".format( + **module.params + ) + + +def fetch_list(module, link, query): + auth = GcpSession(module, "compute") + return auth.list( + link, return_if_object, array_name="items", params={"filter": query} + ) + + +def query_options(filters): + if not filters: + return "" + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != "(" and f[-1] != ")": + queries.append("(%s)" % "".join(f)) + else: + queries.append(f) + + return " ".join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, "JSONDecodeError", ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ["error", "errors"]): + module.fail_json(msg=navigate_hash(result, ["error", "errors"])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template.py new file mode 100644 index 000000000..078569263 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template.py @@ -0,0 +1,1677 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_template +description: +- Defines an Instance Template resource that provides configuration settings for your + virtual machine instances. Instance templates are not tied to the lifetime of an + instance and can be used and reused as to deploy virtual machines. You can also + use different templates to create different virtual machine configurations. Instance + templates are required when you create a managed instance group. +- 'Tip: Disks should be set to autoDelete=true so that leftover disks are not left + behind on machine deletion.' +short_description: Creates a GCP InstanceTemplate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + name: + description: + - Name of the resource. The name is 1-63 characters long and complies with RFC1035. + required: true + type: str + properties: + description: + - The instance properties for this instance template. + required: false + type: dict + suboptions: + can_ip_forward: + description: + - Enables instances created based on this template to send packets with source + IP addresses other than their own and receive packets with destination IP + addresses other than their own. If these instances will be used as an IP + gateway or it will be set as the next-hop in a Route resource, specify true. + If unsure, leave this set to false. + required: false + type: bool + description: + description: + - An optional text description for the instances that are created from this + instance template. + required: false + type: str + disks: + description: + - An array of disks that are associated with the instances that are created + from this template. + elements: dict + required: false + type: list + suboptions: + auto_delete: + description: + - Specifies whether the disk will be auto-deleted when the instance is + deleted (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks + are not left behind on machine deletion.' + required: false + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the + first partition of the disk for its root filesystem. + required: false + type: bool + device_name: + description: + - Specifies a unique device name of your choice that is reflected into + the /dev/disk/by-id/google-* tree of a Linux operating system running + within the instance. This name can be used to reference the device for + mounting, resizing, and so on, from within the instance. + required: false + type: str + disk_encryption_key: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + required: false + type: str + rsa_encrypted_key: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + required: false + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the + boot disk. For example, if you have many disks attached to an instance, + each disk would have a unique index number. If not specified, the server + will choose an appropriate value. + required: false + type: int + initialize_params: + description: + - Specifies the parameters for a new disk that will be created alongside + the new instance. Use initialization parameters to create boot disks + or local SSDs attached to the new instance. + required: false + type: dict + suboptions: + disk_name: + description: + - Specifies the disk name. If not specified, the default is to use + the name of the instance. + required: false + type: str + disk_size_gb: + description: + - Specifies the size of the disk in base-2 GB. + required: false + type: int + disk_type: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + required: false + type: str + source_image: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. + To create a disk with one of the public operating system images, + specify the image by its family name. + required: false + type: str + source_image_encryption_key: + description: + - The customer-supplied encryption key of the source image. Required + if the source image is protected by a customer-supplied encryption + key. + - Instance templates do not store customer-supplied encryption keys, + so you cannot create disks for instances in a managed instance group + if the source images are encrypted with your own keys. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + required: false + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which is + either SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if you + attempt to attach a persistent disk in any other format than SCSI. + - 'Some valid choices include: "SCSI", "NVME"' + required: false + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE mode. + - 'Some valid choices include: "READ_WRITE", "READ_ONLY"' + required: false + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks using + this property. This field is only applicable for persistent disks. + - Note that for InstanceTemplate, specify the disk name, not the URL for + the disk. + - 'This field represents a link to a Disk resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and + value of your resource''s name Alternatively, you can add `register: + name-of-resource` to a gcp_compute_disk task and then set this source + field to "{{ name-of-resource }}"' + required: false + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not + specified, the default is PERSISTENT. + - 'Some valid choices include: "SCRATCH", "PERSISTENT"' + required: false + type: str + labels: + description: + - Labels to apply to this address. A list of key->value pairs. + required: false + type: dict + machine_type: + description: + - The machine type to use in the VM instance template. + required: true + type: str + min_cpu_platform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values + are the friendly names of CPU platforms . + required: false + type: str + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from + this template. These pairs can consist of custom metadata or predefined + keys. + required: false + type: dict + guest_accelerators: + description: + - List of the type and count of accelerator cards attached to the instance + . + elements: dict + required: false + type: list + suboptions: + accelerator_count: + description: + - The number of the guest accelerator cards exposed to this instance. + required: false + type: int + accelerator_type: + description: + - Full or partial URL of the accelerator type resource to expose to this + instance. + required: false + type: str + network_interfaces: + description: + - An array of configurations for this interface. This specifies how this interface + is configured to interact with other network services, such as connecting + to the internet. Only one network interface is supported per instance. + elements: dict + required: false + type: list + suboptions: + access_configs: + description: + - An array of configurations for this interface. Currently, only one access + config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs + specified, then this instance will have no external internet access. + elements: dict + required: false + type: list + suboptions: + name: + description: + - The name of this access configuration. The default and recommended + name is External NAT but you can use any arbitrary string you would + like. For example, My external IP or Network Access. + required: true + type: str + nat_ip: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the project + or leave this field undefined to use an IP from a shared ephemeral + IP address pool. If you specify a static external IP address, it + must live in the same region as the zone of the instance. + - 'This field represents a link to a Address resource in GCP. It can + be specified in two ways. First, you can place a dictionary with + key ''address'' and value of your resource''s address Alternatively, + you can add `register: name-of-resource` to a gcp_compute_address + task and then set this nat_ip field to "{{ name-of-resource }}"' + required: false + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + - 'Some valid choices include: "ONE_TO_ONE_NAT"' + required: true + type: str + set_public_ptr: + description: + - Specifies whether a public DNS PTR record should be created to map + the external IP address of the instance to a DNS domain name. + required: false + type: bool + public_ptr_domain_name: + description: + - The DNS domain name for the public PTR record. You can set this + field only if the setPublicPtr field is enabled. + required: false + type: str + network_tier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid external + IP address, an ephemeral IP will be created with this networkTier. + If an AccessConfig with a valid external IP address is specified, + it must match that of the networkTier associated with the Address + resource owning that IP. + - 'Some valid choices include: "PREMIUM", "STANDARD"' + required: false + type: str + alias_ip_ranges: + description: + - An array of alias IP ranges for this network interface. Can only be + specified for network interfaces on subnet-mode networks. + elements: dict + required: false + type: list + suboptions: + ip_cidr_range: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and cannot + contain IP addresses reserved by system or used by other network + interfaces. This range may be a single IP address (e.g. 10.2.3.4), + a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). + required: false + type: str + subnetwork_range_name: + description: + - Optional subnetwork secondary range name specifying the secondary + range from which to allocate the IP CIDR range for this alias IP + range. If left unspecified, the primary range of the subnetwork + will be used. + required: false + type: str + network: + description: + - Specifies the title of an existing network. When creating an instance, + if neither the network nor the subnetwork is specified, the default + network global/networks/default is used; if the network is not specified + but the subnetwork is specified, the network is inferred. + - 'This field represents a link to a Network resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_network task and then set this network + field to "{{ name-of-resource }}"' + required: false + type: dict + network_ip: + description: + - An IPv4 internal network address to assign to the instance for this + network interface. If not specified by the user, an unused internal + IP is assigned by the system. + required: false + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is optional. + If the network is in custom subnet mode, then this field should be specified. + - 'This field represents a link to a Subnetwork resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key + ''selfLink'' and value of your resource''s selfLink Alternatively, you + can add `register: name-of-resource` to a gcp_compute_subnetwork task + and then set this subnetwork field to "{{ name-of-resource }}"' + required: false + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + required: false + type: dict + suboptions: + automatic_restart: + description: + - Specifies whether the instance should be automatically restarted if + it is terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. + Preemptible instances cannot be automatically restarted. + required: false + type: bool + on_host_maintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + required: false + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set during + instance creation, it cannot be set or changed after the instance has + been created. + required: false + type: bool + service_accounts: + description: + - A list of service accounts, with their specified scopes, authorized for + this instance. Only one service account per VM instance is supported. + elements: dict + required: false + type: list + suboptions: + email: + description: + - Email address of the service account. + required: false + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + elements: str + required: false + type: list + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid + sources or targets for network firewalls and are specified by the client + during instance creation. The tags can be later modified by the setTags + method. Each tag within the list must comply with RFC1035. + required: false + type: dict + suboptions: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash + of the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes + after every request to modify or update metadata. You must always provide + an up-to-date fingerprint hash in order to update or change metadata. + required: false + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply + with RFC1035. + elements: str + required: false + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instancetemplate + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a address + google.cloud.gcp_compute_address: + name: address-instancetemplate + region: us-west1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a instance template + google.cloud.gcp_compute_instance_template: + name: test_object + properties: + disks: + - auto_delete: 'true' + boot: 'true' + initialize_params: + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + machine_type: n1-standard-1 + network_interfaces: + - network: "{{ network }}" + access_configs: + - name: test-config + type: ONE_TO_ONE_NAT + nat_ip: "{{ address }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +name: + description: + - Name of the resource. The name is 1-63 characters long and complies with RFC1035. + returned: success + type: str +properties: + description: + - The instance properties for this instance template. + returned: success + type: complex + contains: + canIpForward: + description: + - Enables instances created based on this template to send packets with source + IP addresses other than their own and receive packets with destination IP + addresses other than their own. If these instances will be used as an IP gateway + or it will be set as the next-hop in a Route resource, specify true. If unsure, + leave this set to false. + returned: success + type: bool + description: + description: + - An optional text description for the instances that are created from this + instance template. + returned: success + type: str + disks: + description: + - An array of disks that are associated with the instances that are created + from this template. + returned: success + type: complex + contains: + licenses: + description: + - Any applicable license URI. + returned: success + type: list + autoDelete: + description: + - Specifies whether the disk will be auto-deleted when the instance is deleted + (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks are + not left behind on machine deletion.' + returned: success + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the first + partition of the disk for its root filesystem. + returned: success + type: bool + deviceName: + description: + - Specifies a unique device name of your choice that is reflected into the + /dev/disk/by-id/google-* tree of a Linux operating system running within + the instance. This name can be used to reference the device for mounting, + resizing, and so on, from within the instance. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC + 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + rsaEncryptedKey: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the boot + disk. For example, if you have many disks attached to an instance, each + disk would have a unique index number. If not specified, the server will + choose an appropriate value. + returned: success + type: int + initializeParams: + description: + - Specifies the parameters for a new disk that will be created alongside + the new instance. Use initialization parameters to create boot disks or + local SSDs attached to the new instance. + returned: success + type: complex + contains: + diskName: + description: + - Specifies the disk name. If not specified, the default is to use the + name of the instance. + returned: success + type: str + diskSizeGb: + description: + - Specifies the size of the disk in base-2 GB. + returned: success + type: int + diskType: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + returned: success + type: str + sourceImage: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. To + create a disk with one of the public operating system images, specify + the image by its family name. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required + if the source image is protected by a customer-supplied encryption + key. + - Instance templates do not store customer-supplied encryption keys, + so you cannot create disks for instances in a managed instance group + if the source images are encrypted with your own keys. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which is + either SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if you + attempt to attach a persistent disk in any other format than SCSI. + returned: success + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE mode. + returned: success + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks using + this property. This field is only applicable for persistent disks. + - Note that for InstanceTemplate, specify the disk name, not the URL for + the disk. + returned: success + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, + the default is PERSISTENT. + returned: success + type: str + labels: + description: + - Labels to apply to this address. A list of key->value pairs. + returned: success + type: dict + machineType: + description: + - The machine type to use in the VM instance template. + returned: success + type: str + minCpuPlatform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values are + the friendly names of CPU platforms . + returned: success + type: str + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from + this template. These pairs can consist of custom metadata or predefined keys. + returned: success + type: dict + guestAccelerators: + description: + - List of the type and count of accelerator cards attached to the instance . + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + acceleratorType: + description: + - Full or partial URL of the accelerator type resource to expose to this + instance. + returned: success + type: str + networkInterfaces: + description: + - An array of configurations for this interface. This specifies how this interface + is configured to interact with other network services, such as connecting + to the internet. Only one network interface is supported per instance. + returned: success + type: complex + contains: + accessConfigs: + description: + - An array of configurations for this interface. Currently, only one access + config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, + then this instance will have no external internet access. + returned: success + type: complex + contains: + name: + description: + - The name of this access configuration. The default and recommended + name is External NAT but you can use any arbitrary string you would + like. For example, My external IP or Network Access. + returned: success + type: str + natIP: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the project + or leave this field undefined to use an IP from a shared ephemeral + IP address pool. If you specify a static external IP address, it must + live in the same region as the zone of the instance. + returned: success + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + returned: success + type: str + setPublicPtr: + description: + - Specifies whether a public DNS PTR record should be created to map + the external IP address of the instance to a DNS domain name. + returned: success + type: bool + publicPtrDomainName: + description: + - The DNS domain name for the public PTR record. You can set this field + only if the setPublicPtr field is enabled. + returned: success + type: str + networkTier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid external + IP address, an ephemeral IP will be created with this networkTier. + If an AccessConfig with a valid external IP address is specified, + it must match that of the networkTier associated with the Address + resource owning that IP. + returned: success + type: str + aliasIpRanges: + description: + - An array of alias IP ranges for this network interface. Can only be specified + for network interfaces on subnet-mode networks. + returned: success + type: complex + contains: + ipCidrRange: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and cannot + contain IP addresses reserved by system or used by other network interfaces. + This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. + /24) or a CIDR format string (e.g. 10.1.2.0/24). + returned: success + type: str + subnetworkRangeName: + description: + - Optional subnetwork secondary range name specifying the secondary + range from which to allocate the IP CIDR range for this alias IP range. + If left unspecified, the primary range of the subnetwork will be used. + returned: success + type: str + name: + description: + - The name of the network interface, generated by the server. For network + devices, these are eth0, eth1, etc . + returned: success + type: str + network: + description: + - Specifies the title of an existing network. When creating an instance, + if neither the network nor the subnetwork is specified, the default network + global/networks/default is used; if the network is not specified but the + subnetwork is specified, the network is inferred. + returned: success + type: dict + networkIP: + description: + - An IPv4 internal network address to assign to the instance for this network + interface. If not specified by the user, an unused internal IP is assigned + by the system. + returned: success + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is optional. + If the network is in custom subnet mode, then this field should be specified. + returned: success + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + returned: success + type: complex + contains: + automaticRestart: + description: + - Specifies whether the instance should be automatically restarted if it + is terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. + Preemptible instances cannot be automatically restarted. + returned: success + type: bool + onHostMaintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + returned: success + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set during + instance creation, it cannot be set or changed after the instance has + been created. + returned: success + type: bool + serviceAccounts: + description: + - A list of service accounts, with their specified scopes, authorized for this + instance. Only one service account per VM instance is supported. + returned: success + type: complex + contains: + email: + description: + - Email address of the service account. + returned: success + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + returned: success + type: list + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid + sources or targets for network firewalls and are specified by the client during + instance creation. The tags can be later modified by the setTags method. Each + tag within the list must comply with RFC1035. + returned: success + type: complex + contains: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash + of the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes after + every request to modify or update metadata. You must always provide an + up-to-date fingerprint hash in order to update or change metadata. + returned: success + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply with + RFC1035. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + properties=dict( + type='dict', + options=dict( + can_ip_forward=dict(type='bool'), + description=dict(type='str'), + disks=dict( + type='list', + elements='dict', + options=dict( + auto_delete=dict(type='bool'), + boot=dict(type='bool'), + device_name=dict(type='str'), + disk_encryption_key=dict(type='dict', no_log=True, options=dict(raw_key=dict(type='str'), rsa_encrypted_key=dict(type='str'))), + index=dict(type='int'), + initialize_params=dict( + type='dict', + options=dict( + disk_name=dict(type='str'), + disk_size_gb=dict(type='int'), + disk_type=dict(type='str'), + source_image=dict(type='str'), + source_image_encryption_key=dict(type='dict', no_log=True, options=dict(raw_key=dict(type='str'))), + ), + ), + interface=dict(type='str'), + mode=dict(type='str'), + source=dict(type='dict'), + type=dict(type='str'), + ), + ), + labels=dict(type='dict'), + machine_type=dict(required=True, type='str'), + min_cpu_platform=dict(type='str'), + metadata=dict(type='dict'), + guest_accelerators=dict(type='list', elements='dict', options=dict(accelerator_count=dict(type='int'), accelerator_type=dict(type='str'))), + network_interfaces=dict( + type='list', + elements='dict', + options=dict( + access_configs=dict( + type='list', + elements='dict', + options=dict( + name=dict(required=True, type='str'), + nat_ip=dict(type='dict'), + type=dict(required=True, type='str'), + set_public_ptr=dict(type='bool'), + public_ptr_domain_name=dict(type='str'), + network_tier=dict(type='str'), + ), + ), + alias_ip_ranges=dict( + type='list', elements='dict', options=dict(ip_cidr_range=dict(type='str'), subnetwork_range_name=dict(type='str')) + ), + network=dict(type='dict'), + network_ip=dict(type='str'), + subnetwork=dict(type='dict'), + ), + ), + scheduling=dict( + type='dict', options=dict(automatic_restart=dict(type='bool'), on_host_maintenance=dict(type='str'), preemptible=dict(type='bool')) + ), + service_accounts=dict(type='list', elements='dict', options=dict(email=dict(type='str'), scopes=dict(type='list', elements='str'))), + tags=dict(type='dict', options=dict(fingerprint=dict(type='str'), items=dict(type='list', elements='str'))), + ), + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#instanceTemplate' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#instanceTemplate', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'properties': InstanceTemplateProperties(module.params.get('properties', {}), module).to_request(), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'properties': InstanceTemplateProperties(response.get(u'properties', {}), module).from_response(), + } + + +def disk_type_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/zones/.*/diskTypes/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + response = fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceTemplate') + if response: + return decode_response(response, module) + else: + return {} + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def encode_request(request, module): + if 'properties' in request and request['properties'] is not None and 'metadata' in request['properties'] and request['properties']['metadata'] is not None: + request['properties']['metadata'] = metadata_encoder(request['properties']['metadata']) + return request + + +def decode_response(response, module): + if ( + 'properties' in response + and response['properties'] is not None + and 'metadata' in response['properties'] + and response['properties']['metadata'] is not None + ): + response['properties']['metadata'] = metadata_decoder(response['properties']['metadata']) + return response + + +# TODO(alexstephen): Implement updating metadata on existing resources. + +# Expose instance 'metadata' as a simple name/value pair hash. However the API +# defines metadata as a NestedObject with the following layout: +# +# metadata { +# fingerprint: 'hash-of-last-metadata' +# items: [ +# { +# key: 'metadata1-key' +# value: 'metadata1-value' +# }, +# ... +# ] +# } +# +def metadata_encoder(metadata): + metadata_new = [] + for key in metadata: + value = metadata[key] + metadata_new.append({"key": key, "value": value}) + return {'items': metadata_new} + + +# Map metadata.items[]{key:,value:} => metadata[key]=value +def metadata_decoder(metadata): + items = {} + if 'items' in metadata: + metadata_items = metadata['items'] + for item in metadata_items: + items[item['key']] = item['value'] + return items + + +class InstanceTemplateProperties(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'canIpForward': self.request.get('can_ip_forward'), + u'description': self.request.get('description'), + u'disks': InstanceTemplateDisksArray(self.request.get('disks', []), self.module).to_request(), + u'labels': self.request.get('labels'), + u'machineType': self.request.get('machine_type'), + u'minCpuPlatform': self.request.get('min_cpu_platform'), + u'metadata': self.request.get('metadata'), + u'guestAccelerators': InstanceTemplateGuestacceleratorsArray(self.request.get('guest_accelerators', []), self.module).to_request(), + u'networkInterfaces': InstanceTemplateNetworkinterfacesArray(self.request.get('network_interfaces', []), self.module).to_request(), + u'scheduling': InstanceTemplateScheduling(self.request.get('scheduling', {}), self.module).to_request(), + u'serviceAccounts': InstanceTemplateServiceaccountsArray(self.request.get('service_accounts', []), self.module).to_request(), + u'tags': InstanceTemplateTags(self.request.get('tags', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'canIpForward': self.request.get(u'canIpForward'), + u'description': self.request.get(u'description'), + u'disks': InstanceTemplateDisksArray(self.request.get(u'disks', []), self.module).from_response(), + u'labels': self.request.get(u'labels'), + u'machineType': self.request.get(u'machineType'), + u'minCpuPlatform': self.request.get(u'minCpuPlatform'), + u'metadata': self.request.get(u'metadata'), + u'guestAccelerators': InstanceTemplateGuestacceleratorsArray(self.request.get(u'guestAccelerators', []), self.module).from_response(), + u'networkInterfaces': InstanceTemplateNetworkinterfacesArray(self.request.get(u'networkInterfaces', []), self.module).from_response(), + u'scheduling': InstanceTemplateScheduling(self.request.get(u'scheduling', {}), self.module).from_response(), + u'serviceAccounts': InstanceTemplateServiceaccountsArray(self.request.get(u'serviceAccounts', []), self.module).from_response(), + u'tags': InstanceTemplateTags(self.request.get(u'tags', {}), self.module).from_response(), + } + ) + + +class InstanceTemplateDisksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'autoDelete': item.get('auto_delete'), + u'boot': item.get('boot'), + u'deviceName': item.get('device_name'), + u'diskEncryptionKey': InstanceTemplateDiskencryptionkey(item.get('disk_encryption_key', {}), self.module).to_request(), + u'index': item.get('index'), + u'initializeParams': InstanceTemplateInitializeparams(item.get('initialize_params', {}), self.module).to_request(), + u'interface': item.get('interface'), + u'mode': item.get('mode'), + u'source': replace_resource_dict(item.get(u'source', {}), 'name'), + u'type': item.get('type'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'autoDelete': item.get(u'autoDelete'), + u'boot': item.get(u'boot'), + u'deviceName': item.get(u'deviceName'), + u'diskEncryptionKey': InstanceTemplateDiskencryptionkey(item.get(u'diskEncryptionKey', {}), self.module).from_response(), + u'index': item.get(u'index'), + u'initializeParams': InstanceTemplateInitializeparams(self.module.params.get('initialize_params', {}), self.module).to_request(), + u'interface': item.get(u'interface'), + u'mode': item.get(u'mode'), + u'source': item.get(u'source'), + u'type': item.get(u'type'), + } + ) + + +class InstanceTemplateDiskencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'rsaEncryptedKey': self.request.get('rsa_encrypted_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'rsaEncryptedKey': self.request.get(u'rsaEncryptedKey')}) + + +class InstanceTemplateInitializeparams(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'diskName': self.request.get('disk_name'), + u'diskSizeGb': self.request.get('disk_size_gb'), + u'diskType': disk_type_selflink(self.request.get('disk_type'), self.module.params), + u'sourceImage': self.request.get('source_image'), + u'sourceImageEncryptionKey': InstanceTemplateSourceimageencryptionkey( + self.request.get('source_image_encryption_key', {}), self.module + ).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'diskName': self.request.get(u'diskName'), + u'diskSizeGb': self.request.get(u'diskSizeGb'), + u'diskType': self.request.get(u'diskType'), + u'sourceImage': self.request.get(u'sourceImage'), + u'sourceImageEncryptionKey': InstanceTemplateSourceimageencryptionkey( + self.request.get(u'sourceImageEncryptionKey', {}), self.module + ).from_response(), + } + ) + + +class InstanceTemplateSourceimageencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) + + +class InstanceTemplateGuestacceleratorsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get('accelerator_count'), u'acceleratorType': item.get('accelerator_type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get(u'acceleratorCount'), u'acceleratorType': item.get(u'acceleratorType')}) + + +class InstanceTemplateNetworkinterfacesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'accessConfigs': InstanceTemplateAccessconfigsArray(item.get('access_configs', []), self.module).to_request(), + u'aliasIpRanges': InstanceTemplateAliasiprangesArray(item.get('alias_ip_ranges', []), self.module).to_request(), + u'network': replace_resource_dict(item.get(u'network', {}), 'selfLink'), + u'networkIP': item.get('network_ip'), + u'subnetwork': replace_resource_dict(item.get(u'subnetwork', {}), 'selfLink'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'accessConfigs': InstanceTemplateAccessconfigsArray(item.get(u'accessConfigs', []), self.module).from_response(), + u'aliasIpRanges': InstanceTemplateAliasiprangesArray(item.get(u'aliasIpRanges', []), self.module).from_response(), + u'network': item.get(u'network'), + u'networkIP': item.get(u'networkIP'), + u'subnetwork': item.get(u'subnetwork'), + } + ) + + +class InstanceTemplateAccessconfigsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get('name'), + u'natIP': replace_resource_dict(item.get(u'nat_ip', {}), 'address'), + u'type': item.get('type'), + u'setPublicPtr': item.get('set_public_ptr'), + u'publicPtrDomainName': item.get('public_ptr_domain_name'), + u'networkTier': item.get('network_tier'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'name': item.get(u'name'), + u'natIP': item.get(u'natIP'), + u'type': item.get(u'type'), + u'setPublicPtr': item.get(u'setPublicPtr'), + u'publicPtrDomainName': item.get(u'publicPtrDomainName'), + u'networkTier': item.get(u'networkTier'), + } + ) + + +class InstanceTemplateAliasiprangesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'ipCidrRange': item.get('ip_cidr_range'), u'subnetworkRangeName': item.get('subnetwork_range_name')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'ipCidrRange': item.get(u'ipCidrRange'), u'subnetworkRangeName': item.get(u'subnetworkRangeName')}) + + +class InstanceTemplateScheduling(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'automaticRestart': self.request.get('automatic_restart'), + u'onHostMaintenance': self.request.get('on_host_maintenance'), + u'preemptible': self.request.get('preemptible'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'automaticRestart': self.request.get(u'automaticRestart'), + u'onHostMaintenance': self.request.get(u'onHostMaintenance'), + u'preemptible': self.request.get(u'preemptible'), + } + ) + + +class InstanceTemplateServiceaccountsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'email': item.get('email'), u'scopes': item.get('scopes')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'email': item.get(u'email'), u'scopes': item.get(u'scopes')}) + + +class InstanceTemplateTags(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'fingerprint': self.request.get('fingerprint'), u'items': self.request.get('items')}) + + def from_response(self): + return remove_nones_from_dict({u'fingerprint': self.request.get(u'fingerprint'), u'items': self.request.get(u'items')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template_info.py new file mode 100644 index 000000000..b08cdfaab --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_instance_template_info.py @@ -0,0 +1,614 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_template_info +description: +- Gather info for GCP InstanceTemplate +short_description: Gather info for GCP InstanceTemplate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance template + gcp_compute_instance_template_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. The name is 1-63 characters long and complies with RFC1035. + returned: success + type: str + properties: + description: + - The instance properties for this instance template. + returned: success + type: complex + contains: + canIpForward: + description: + - Enables instances created based on this template to send packets with + source IP addresses other than their own and receive packets with destination + IP addresses other than their own. If these instances will be used as + an IP gateway or it will be set as the next-hop in a Route resource, specify + true. If unsure, leave this set to false. + returned: success + type: bool + description: + description: + - An optional text description for the instances that are created from this + instance template. + returned: success + type: str + disks: + description: + - An array of disks that are associated with the instances that are created + from this template. + returned: success + type: complex + contains: + licenses: + description: + - Any applicable license URI. + returned: success + type: list + autoDelete: + description: + - Specifies whether the disk will be auto-deleted when the instance + is deleted (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks + are not left behind on machine deletion.' + returned: success + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the + first partition of the disk for its root filesystem. + returned: success + type: bool + deviceName: + description: + - Specifies a unique device name of your choice that is reflected into + the /dev/disk/by-id/google-* tree of a Linux operating system running + within the instance. This name can be used to reference the device + for mounting, resizing, and so on, from within the instance. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + rsaEncryptedKey: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the + boot disk. For example, if you have many disks attached to an instance, + each disk would have a unique index number. If not specified, the + server will choose an appropriate value. + returned: success + type: int + initializeParams: + description: + - Specifies the parameters for a new disk that will be created alongside + the new instance. Use initialization parameters to create boot disks + or local SSDs attached to the new instance. + returned: success + type: complex + contains: + diskName: + description: + - Specifies the disk name. If not specified, the default is to use + the name of the instance. + returned: success + type: str + diskSizeGb: + description: + - Specifies the size of the disk in base-2 GB. + returned: success + type: int + diskType: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + returned: success + type: str + sourceImage: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. + To create a disk with one of the public operating system images, + specify the image by its family name. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required + if the source image is protected by a customer-supplied encryption + key. + - Instance templates do not store customer-supplied encryption keys, + so you cannot create disks for instances in a managed instance + group if the source images are encrypted with your own keys. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which + is either SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if + you attempt to attach a persistent disk in any other format than SCSI. + returned: success + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE + mode. + returned: success + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks + using this property. This field is only applicable for persistent + disks. + - Note that for InstanceTemplate, specify the disk name, not the URL + for the disk. + returned: success + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not + specified, the default is PERSISTENT. + returned: success + type: str + labels: + description: + - Labels to apply to this address. A list of key->value pairs. + returned: success + type: dict + machineType: + description: + - The machine type to use in the VM instance template. + returned: success + type: str + minCpuPlatform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values + are the friendly names of CPU platforms . + returned: success + type: str + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from + this template. These pairs can consist of custom metadata or predefined + keys. + returned: success + type: dict + guestAccelerators: + description: + - List of the type and count of accelerator cards attached to the instance + . + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + acceleratorType: + description: + - Full or partial URL of the accelerator type resource to expose to + this instance. + returned: success + type: str + networkInterfaces: + description: + - An array of configurations for this interface. This specifies how this + interface is configured to interact with other network services, such + as connecting to the internet. Only one network interface is supported + per instance. + returned: success + type: complex + contains: + accessConfigs: + description: + - An array of configurations for this interface. Currently, only one + access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs + specified, then this instance will have no external internet access. + returned: success + type: complex + contains: + name: + description: + - The name of this access configuration. The default and recommended + name is External NAT but you can use any arbitrary string you + would like. For example, My external IP or Network Access. + returned: success + type: str + natIP: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the + project or leave this field undefined to use an IP from a shared + ephemeral IP address pool. If you specify a static external IP + address, it must live in the same region as the zone of the instance. + returned: success + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + returned: success + type: str + setPublicPtr: + description: + - Specifies whether a public DNS PTR record should be created to + map the external IP address of the instance to a DNS domain name. + returned: success + type: bool + publicPtrDomainName: + description: + - The DNS domain name for the public PTR record. You can set this + field only if the setPublicPtr field is enabled. + returned: success + type: str + networkTier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid + external IP address, an ephemeral IP will be created with this + networkTier. If an AccessConfig with a valid external IP address + is specified, it must match that of the networkTier associated + with the Address resource owning that IP. + returned: success + type: str + aliasIpRanges: + description: + - An array of alias IP ranges for this network interface. Can only be + specified for network interfaces on subnet-mode networks. + returned: success + type: complex + contains: + ipCidrRange: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and + cannot contain IP addresses reserved by system or used by other + network interfaces. This range may be a single IP address (e.g. + 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. + 10.1.2.0/24). + returned: success + type: str + subnetworkRangeName: + description: + - Optional subnetwork secondary range name specifying the secondary + range from which to allocate the IP CIDR range for this alias + IP range. If left unspecified, the primary range of the subnetwork + will be used. + returned: success + type: str + name: + description: + - The name of the network interface, generated by the server. For network + devices, these are eth0, eth1, etc . + returned: success + type: str + network: + description: + - Specifies the title of an existing network. When creating an instance, + if neither the network nor the subnetwork is specified, the default + network global/networks/default is used; if the network is not specified + but the subnetwork is specified, the network is inferred. + returned: success + type: dict + networkIP: + description: + - An IPv4 internal network address to assign to the instance for this + network interface. If not specified by the user, an unused internal + IP is assigned by the system. + returned: success + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is + optional. If the network is in custom subnet mode, then this field + should be specified. + returned: success + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + returned: success + type: complex + contains: + automaticRestart: + description: + - Specifies whether the instance should be automatically restarted if + it is terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. + Preemptible instances cannot be automatically restarted. + returned: success + type: bool + onHostMaintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + returned: success + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set + during instance creation, it cannot be set or changed after the instance + has been created. + returned: success + type: bool + serviceAccounts: + description: + - A list of service accounts, with their specified scopes, authorized for + this instance. Only one service account per VM instance is supported. + returned: success + type: complex + contains: + email: + description: + - Email address of the service account. + returned: success + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + returned: success + type: list + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid + sources or targets for network firewalls and are specified by the client + during instance creation. The tags can be later modified by the setTags + method. Each tag within the list must comply with RFC1035. + returned: success + type: complex + contains: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash + of the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes + after every request to modify or update metadata. You must always + provide an up-to-date fingerprint hash in order to update or change + metadata. + returned: success + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply + with RFC1035. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment.py new file mode 100644 index 000000000..7a2c8037c --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment.py @@ -0,0 +1,675 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_interconnect_attachment +description: +- Represents an InterconnectAttachment (VLAN attachment) resource. For more information, + see Creating VLAN Attachments. +short_description: Creates a GCP InterconnectAttachment +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + admin_enabled: + description: + - Whether the VLAN attachment is enabled or disabled. When using PARTNER type + this will Pre-Activate the interconnect attachment . + required: false + default: 'true' + type: bool + interconnect: + description: + - URL of the underlying Interconnect object that this attachment's traffic will + traverse through. Required if type is DEDICATED, must not be set if type is + PARTNER. + required: false + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + mtu: + description: + - Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect + attachment. Currently, only 1440 and 1500 are allowed. If not specified, the + value will default to 1440. + required: false + type: str + bandwidth: + description: + - Provisioned bandwidth capacity for the interconnect attachment. + - For attachments of type DEDICATED, the user can set the bandwidth. + - For attachments of type PARTNER, the Google Partner that is operating the interconnect + must set the bandwidth. + - Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, Defaults + to BPS_10G . + - 'Some valid choices include: "BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", + "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", + "BPS_50G"' + required: false + type: str + edge_availability_domain: + description: + - Desired availability domain for the attachment. Only available for type PARTNER, + at creation time. For improved reliability, customers should configure a pair + of attachments with one per availability domain. The selected availability domain + will be provided to the Partner via the pairing key so that the provisioned + circuit will lie in the specified domain. If not specified, the value will default + to AVAILABILITY_DOMAIN_ANY. + required: false + type: str + type: + description: + - The type of InterconnectAttachment you wish to create. Defaults to DEDICATED. + - 'Some valid choices include: "DEDICATED", "PARTNER", "PARTNER_PROVIDER"' + required: false + type: str + router: + description: + - URL of the cloud router to be used for dynamic routing. This router must be + in the same region as this InterconnectAttachment. The InterconnectAttachment + will automatically connect the Interconnect to the network & region within which + the Cloud Router is configured. + - 'This field represents a link to a Router resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_router task and then set this router field to "{{ name-of-resource + }}"' + required: true + type: dict + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + candidate_subnets: + description: + - Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress + and customerRouterIpAddress for this attachment. + - All prefixes must be within link-local address space (169.254.0.0/16) and must + be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 + from the supplied candidate prefix(es). The request will fail if all possible + /29s are in use on Google's edge. If not supplied, Google will randomly select + an unused /29 from all of link-local space. + elements: str + required: false + type: list + vlan_tag8021q: + description: + - The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When using + PARTNER type this will be managed upstream. + required: false + type: int + ipsec_internal_addresses: + description: + - URL of addresses that have been reserved for the interconnect attachment, Used + only for interconnect attachment that has the encryption option as IPSEC. + - The addresses must be RFC 1918 IP address ranges. When creating HA VPN gateway + over the interconnect attachment, if the attachment is configured to use an + RFC 1918 IP address, then the VPN gateway's IP address will be allocated from + the IP address range specified here. + - For example, if the HA VPN gateway's interface 0 is paired to this interconnect + attachment, then an RFC 1918 IP address for the VPN gateway interface 0 will + be allocated from the IP address specified for this interconnect attachment. + - If this field is not specified for interconnect attachment that has encryption + option as IPSEC, later on when creating HA VPN gateway on this interconnect + attachment, the HA VPN gateway's IP address will be allocated from regional + external IP address pool. + elements: dict + required: false + type: list + encryption: + description: + - 'Indicates the user-supplied encryption option of this interconnect attachment: + NONE is the default value, which means that the attachment carries unencrypted + traffic. VMs can send traffic to, or receive traffic from, this type of attachment.' + - IPSEC indicates that the attachment carries only traffic encrypted by an IPsec + device such as an HA VPN gateway. VMs cannot directly send traffic to, or receive + traffic from, such an attachment. To use IPsec-encrypted Cloud Interconnect + create the attachment using this option. + - Not currently available publicly. + - 'Some valid choices include: "NONE", "IPSEC"' + required: false + default: NONE + type: str + region: + description: + - Region where the regional interconnect attachment resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a interconnect attachment + google.cloud.gcp_compute_interconnect_attachment: + name: test_object + region: us-central1 + project: test_project + auth_kind: serviceaccount + interconnect: https://googleapis.com/compute/v1/projects/test_project/global/interconnects/... + router: https://googleapis.com/compute/v1/projects/test_project/regions/us-central1/routers/... + service_account_file: "/tmp/auth.pem" + state: present + register: disk +''' + +RETURN = ''' +adminEnabled: + description: + - Whether the VLAN attachment is enabled or disabled. When using PARTNER type this + will Pre-Activate the interconnect attachment . + returned: success + type: bool +cloudRouterIpAddress: + description: + - IPv4 address + prefix length to be configured on Cloud Router Interface for this + interconnect attachment. + returned: success + type: str +customerRouterIpAddress: + description: + - IPv4 address + prefix length to be configured on the customer router subinterface + for this interconnect attachment. + returned: success + type: str +interconnect: + description: + - URL of the underlying Interconnect object that this attachment's traffic will + traverse through. Required if type is DEDICATED, must not be set if type is PARTNER. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +mtu: + description: + - Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect + attachment. Currently, only 1440 and 1500 are allowed. If not specified, the value + will default to 1440. + returned: success + type: str +bandwidth: + description: + - Provisioned bandwidth capacity for the interconnect attachment. + - For attachments of type DEDICATED, the user can set the bandwidth. + - For attachments of type PARTNER, the Google Partner that is operating the interconnect + must set the bandwidth. + - Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, Defaults + to BPS_10G . + returned: success + type: str +edgeAvailabilityDomain: + description: + - Desired availability domain for the attachment. Only available for type PARTNER, + at creation time. For improved reliability, customers should configure a pair + of attachments with one per availability domain. The selected availability domain + will be provided to the Partner via the pairing key so that the provisioned circuit + will lie in the specified domain. If not specified, the value will default to + AVAILABILITY_DOMAIN_ANY. + returned: success + type: str +pairingKey: + description: + - '[Output only for type PARTNER. Not present for DEDICATED]. The opaque identifier + of an PARTNER attachment used to initiate provisioning with a selected partner. + Of the form "XXXXX/region/domain" .' + returned: success + type: str +partnerAsn: + description: + - "[Output only for type PARTNER. Not present for DEDICATED]. Optional BGP ASN for + the router that should be supplied by a layer 3 Partner if they configured BGP + on behalf of the customer." + returned: success + type: str +privateInterconnectInfo: + description: + - Information specific to an InterconnectAttachment. This property is populated + if the interconnect that this is attached to is of type DEDICATED. + returned: success + type: complex + contains: + tag8021q: + description: + - 802.1q encapsulation tag to be used for traffic between Google and the customer, + going to and from this network and region. + returned: success + type: int +type: + description: + - The type of InterconnectAttachment you wish to create. Defaults to DEDICATED. + returned: success + type: str +state: + description: + - "[Output Only] The current state of this attachment's functionality." + returned: success + type: str +googleReferenceId: + description: + - Google reference ID, to be used when raising support tickets with Google or otherwise + to debug backend connectivity issues. + returned: success + type: str +router: + description: + - URL of the cloud router to be used for dynamic routing. This router must be in + the same region as this InterconnectAttachment. The InterconnectAttachment will + automatically connect the Interconnect to the network & region within which the + Cloud Router is configured. + returned: success + type: dict +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +candidateSubnets: + description: + - Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress + and customerRouterIpAddress for this attachment. + - All prefixes must be within link-local address space (169.254.0.0/16) and must + be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 + from the supplied candidate prefix(es). The request will fail if all possible + /29s are in use on Google's edge. If not supplied, Google will randomly select + an unused /29 from all of link-local space. + returned: success + type: list +vlanTag8021q: + description: + - The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When using + PARTNER type this will be managed upstream. + returned: success + type: int +ipsecInternalAddresses: + description: + - URL of addresses that have been reserved for the interconnect attachment, Used + only for interconnect attachment that has the encryption option as IPSEC. + - The addresses must be RFC 1918 IP address ranges. When creating HA VPN gateway + over the interconnect attachment, if the attachment is configured to use an RFC + 1918 IP address, then the VPN gateway's IP address will be allocated from the + IP address range specified here. + - For example, if the HA VPN gateway's interface 0 is paired to this interconnect + attachment, then an RFC 1918 IP address for the VPN gateway interface 0 will be + allocated from the IP address specified for this interconnect attachment. + - If this field is not specified for interconnect attachment that has encryption + option as IPSEC, later on when creating HA VPN gateway on this interconnect attachment, + the HA VPN gateway's IP address will be allocated from regional external IP address + pool. + returned: success + type: list +encryption: + description: + - 'Indicates the user-supplied encryption option of this interconnect attachment: + NONE is the default value, which means that the attachment carries unencrypted + traffic. VMs can send traffic to, or receive traffic from, this type of attachment.' + - IPSEC indicates that the attachment carries only traffic encrypted by an IPsec + device such as an HA VPN gateway. VMs cannot directly send traffic to, or receive + traffic from, such an attachment. To use IPsec-encrypted Cloud Interconnect create + the attachment using this option. + - Not currently available publicly. + returned: success + type: str +region: + description: + - Region where the regional interconnect attachment resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + admin_enabled=dict(default=True, type='bool'), + interconnect=dict(type='str'), + description=dict(type='str'), + mtu=dict(type='str'), + bandwidth=dict(type='str'), + edge_availability_domain=dict(type='str'), + type=dict(type='str'), + router=dict(required=True, type='dict'), + name=dict(required=True, type='str'), + candidate_subnets=dict(type='list', elements='str'), + vlan_tag8021q=dict(type='int'), + ipsec_internal_addresses=dict(type='list', elements='dict'), + encryption=dict(default='NONE', type='str'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#interconnectAttachment' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.patch(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#interconnectAttachment', + u'adminEnabled': module.params.get('admin_enabled'), + u'interconnect': module.params.get('interconnect'), + u'description': module.params.get('description'), + u'mtu': module.params.get('mtu'), + u'bandwidth': module.params.get('bandwidth'), + u'edgeAvailabilityDomain': module.params.get('edge_availability_domain'), + u'type': module.params.get('type'), + u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'), + u'name': module.params.get('name'), + u'candidateSubnets': module.params.get('candidate_subnets'), + u'vlanTag8021q': module.params.get('vlan_tag8021q'), + u'ipsecInternalAddresses': replace_resource_dict(module.params.get('ipsec_internal_addresses', []), 'selfLink'), + u'encryption': module.params.get('encryption'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'adminEnabled': response.get(u'adminEnabled'), + u'cloudRouterIpAddress': response.get(u'cloudRouterIpAddress'), + u'customerRouterIpAddress': response.get(u'customerRouterIpAddress'), + u'interconnect': module.params.get('interconnect'), + u'description': response.get(u'description'), + u'mtu': response.get(u'mtu'), + u'bandwidth': response.get(u'bandwidth'), + u'edgeAvailabilityDomain': module.params.get('edge_availability_domain'), + u'pairingKey': response.get(u'pairingKey'), + u'partnerAsn': response.get(u'partnerAsn'), + u'privateInterconnectInfo': InterconnectAttachmentPrivateinterconnectinfo(response.get(u'privateInterconnectInfo', {}), module).from_response(), + u'type': module.params.get('type'), + u'state': response.get(u'state'), + u'googleReferenceId': response.get(u'googleReferenceId'), + u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'candidateSubnets': module.params.get('candidate_subnets'), + u'vlanTag8021q': module.params.get('vlan_tag8021q'), + u'ipsecInternalAddresses': replace_resource_dict(module.params.get('ipsec_internal_addresses', []), 'selfLink'), + u'encryption': module.params.get('encryption'), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#interconnectAttachment') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class InterconnectAttachmentPrivateinterconnectinfo(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({}) + + def from_response(self): + return remove_nones_from_dict({}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment_info.py new file mode 100644 index 000000000..933bdecb5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_interconnect_attachment_info.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_interconnect_attachment_info +description: +- Gather info for GCP InterconnectAttachment +short_description: Gather info for GCP InterconnectAttachment +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - Region where the regional interconnect attachment resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an interconnect attachment + gcp_compute_interconnect_attachment_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + adminEnabled: + description: + - Whether the VLAN attachment is enabled or disabled. When using PARTNER type + this will Pre-Activate the interconnect attachment . + returned: success + type: bool + cloudRouterIpAddress: + description: + - IPv4 address + prefix length to be configured on Cloud Router Interface for + this interconnect attachment. + returned: success + type: str + customerRouterIpAddress: + description: + - IPv4 address + prefix length to be configured on the customer router subinterface + for this interconnect attachment. + returned: success + type: str + interconnect: + description: + - URL of the underlying Interconnect object that this attachment's traffic will + traverse through. Required if type is DEDICATED, must not be set if type is + PARTNER. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + mtu: + description: + - Maximum Transmission Unit (MTU), in bytes, of packets passing through this + interconnect attachment. Currently, only 1440 and 1500 are allowed. If not + specified, the value will default to 1440. + returned: success + type: str + bandwidth: + description: + - Provisioned bandwidth capacity for the interconnect attachment. + - For attachments of type DEDICATED, the user can set the bandwidth. + - For attachments of type PARTNER, the Google Partner that is operating the + interconnect must set the bandwidth. + - Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, + Defaults to BPS_10G . + returned: success + type: str + edgeAvailabilityDomain: + description: + - Desired availability domain for the attachment. Only available for type PARTNER, + at creation time. For improved reliability, customers should configure a pair + of attachments with one per availability domain. The selected availability + domain will be provided to the Partner via the pairing key so that the provisioned + circuit will lie in the specified domain. If not specified, the value will + default to AVAILABILITY_DOMAIN_ANY. + returned: success + type: str + pairingKey: + description: + - '[Output only for type PARTNER. Not present for DEDICATED]. The opaque identifier + of an PARTNER attachment used to initiate provisioning with a selected partner. + Of the form "XXXXX/region/domain" .' + returned: success + type: str + partnerAsn: + description: + - "[Output only for type PARTNER. Not present for DEDICATED]. Optional BGP ASN + for the router that should be supplied by a layer 3 Partner if they configured + BGP on behalf of the customer." + returned: success + type: str + privateInterconnectInfo: + description: + - Information specific to an InterconnectAttachment. This property is populated + if the interconnect that this is attached to is of type DEDICATED. + returned: success + type: complex + contains: + tag8021q: + description: + - 802.1q encapsulation tag to be used for traffic between Google and the + customer, going to and from this network and region. + returned: success + type: int + type: + description: + - The type of InterconnectAttachment you wish to create. Defaults to DEDICATED. + returned: success + type: str + state: + description: + - "[Output Only] The current state of this attachment's functionality." + returned: success + type: str + googleReferenceId: + description: + - Google reference ID, to be used when raising support tickets with Google or + otherwise to debug backend connectivity issues. + returned: success + type: str + router: + description: + - URL of the cloud router to be used for dynamic routing. This router must be + in the same region as this InterconnectAttachment. The InterconnectAttachment + will automatically connect the Interconnect to the network & region within + which the Cloud Router is configured. + returned: success + type: dict + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + candidateSubnets: + description: + - Up to 16 candidate prefixes that can be used to restrict the allocation of + cloudRouterIpAddress and customerRouterIpAddress for this attachment. + - All prefixes must be within link-local address space (169.254.0.0/16) and + must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused + /29 from the supplied candidate prefix(es). The request will fail if all possible + /29s are in use on Google's edge. If not supplied, Google will randomly select + an unused /29 from all of link-local space. + returned: success + type: list + vlanTag8021q: + description: + - The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When using + PARTNER type this will be managed upstream. + returned: success + type: int + ipsecInternalAddresses: + description: + - URL of addresses that have been reserved for the interconnect attachment, + Used only for interconnect attachment that has the encryption option as IPSEC. + - The addresses must be RFC 1918 IP address ranges. When creating HA VPN gateway + over the interconnect attachment, if the attachment is configured to use an + RFC 1918 IP address, then the VPN gateway's IP address will be allocated from + the IP address range specified here. + - For example, if the HA VPN gateway's interface 0 is paired to this interconnect + attachment, then an RFC 1918 IP address for the VPN gateway interface 0 will + be allocated from the IP address specified for this interconnect attachment. + - If this field is not specified for interconnect attachment that has encryption + option as IPSEC, later on when creating HA VPN gateway on this interconnect + attachment, the HA VPN gateway's IP address will be allocated from regional + external IP address pool. + returned: success + type: list + encryption: + description: + - 'Indicates the user-supplied encryption option of this interconnect attachment: + NONE is the default value, which means that the attachment carries unencrypted + traffic. VMs can send traffic to, or receive traffic from, this type of attachment.' + - IPSEC indicates that the attachment carries only traffic encrypted by an IPsec + device such as an HA VPN gateway. VMs cannot directly send traffic to, or + receive traffic from, such an attachment. To use IPsec-encrypted Cloud Interconnect + create the attachment using this option. + - Not currently available publicly. + returned: success + type: str + region: + description: + - Region where the regional interconnect attachment resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_network.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network.py new file mode 100644 index 000000000..acaf59dcc --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_network +description: +- Manages a VPC network or legacy network resource on GCP. +short_description: Creates a GCP Network +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. The resource must be recreated to + modify this field. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + auto_create_subnetworks: + description: + - When set to `true`, the network is created in "auto subnet mode" and it will + create a subnet for each region automatically across the `10.128.0.0/9` address + range. + - When set to `false`, the network is created in "custom subnet mode" so the user + can explicitly connect subnetwork resources. + required: false + type: bool + routing_config: + description: + - The network-level routing configuration for this network. Used by Cloud Router + to determine what type of network-wide routing behavior to enforce. + required: false + type: dict + suboptions: + routing_mode: + description: + - The network-wide routing mode to use. If set to `REGIONAL`, this network's + cloud routers will only advertise routes with subnetworks of this network + in the same region as the router. If set to `GLOBAL`, this network's cloud + routers will advertise routes with all subnetworks of this network, across + regions. + - 'Some valid choices include: "REGIONAL", "GLOBAL"' + required: true + type: str + mtu: + description: + - Maximum Transmission Unit in bytes. The minimum value for this field is 1460 + and the maximum value is 1500 bytes. + required: false + type: int + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/networks)' +- 'Official Documentation: U(https://cloud.google.com/vpc/docs/vpc)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: test_object + auto_create_subnetworks: 'true' + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +description: + description: + - An optional description of this resource. The resource must be recreated to modify + this field. + returned: success + type: str +gateway_ipv4: + description: + - The gateway address for default routing out of the network. This value is selected + by GCP. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +subnetworks: + description: + - Server-defined fully-qualified URLs for all subnetworks in this network. + returned: success + type: list +autoCreateSubnetworks: + description: + - When set to `true`, the network is created in "auto subnet mode" and it will create + a subnet for each region automatically across the `10.128.0.0/9` address range. + - When set to `false`, the network is created in "custom subnet mode" so the user + can explicitly connect subnetwork resources. + returned: success + type: bool +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +routingConfig: + description: + - The network-level routing configuration for this network. Used by Cloud Router + to determine what type of network-wide routing behavior to enforce. + returned: success + type: complex + contains: + routingMode: + description: + - The network-wide routing mode to use. If set to `REGIONAL`, this network's + cloud routers will only advertise routes with subnetworks of this network + in the same region as the router. If set to `GLOBAL`, this network's cloud + routers will advertise routes with all subnetworks of this network, across + regions. + returned: success + type: str +mtu: + description: + - Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and + the maximum value is 1500 bytes. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + auto_create_subnetworks=dict(type='bool'), + routing_config=dict(type='dict', options=dict(routing_mode=dict(required=True, type='str'))), + mtu=dict(type='int'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#network' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('routingConfig') != request.get('routingConfig'): + routing_config_update(module, request, response) + + +def routing_config_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.patch( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/networks/{name}"]).format(**module.params), + {u'routingConfig': NetworkRoutingconfig(module.params.get('routing_config', {}), module).to_request()}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#network', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'autoCreateSubnetworks': module.params.get('auto_create_subnetworks'), + u'routingConfig': NetworkRoutingconfig(module.params.get('routing_config', {}), module).to_request(), + u'mtu': module.params.get('mtu'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/networks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/networks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'description': module.params.get('description'), + u'gatewayIPv4': response.get(u'gatewayIPv4'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'subnetworks': response.get(u'subnetworks'), + u'autoCreateSubnetworks': module.params.get('auto_create_subnetworks'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'routingConfig': NetworkRoutingconfig(response.get(u'routingConfig', {}), module).from_response(), + u'mtu': module.params.get('mtu'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#network') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class NetworkRoutingconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'routingMode': self.request.get('routing_mode')}) + + def from_response(self): + return remove_nones_from_dict({u'routingMode': self.request.get(u'routingMode')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group.py new file mode 100644 index 000000000..9712c42d9 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group.py @@ -0,0 +1,454 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_network_endpoint_group +description: +- Network endpoint groups (NEGs) are zonal resources that represent collections of + IP address and port combinations for GCP resources within a single subnet. Each + IP address and port combination is called a network endpoint. +- Network endpoint groups can be used as backends in backend services for HTTP(S), + TCP proxy, and SSL proxy load balancers. You cannot use NEGs as a backend with internal + load balancers. Because NEG backends allow you to specify IP addresses and ports, + you can distribute traffic in a granular fashion among applications or containers + running within VM instances. +- Recreating a network endpoint group that's in use by another resource will give + a `resourceInUseByAnotherResource` error. Use `lifecycle.create_before_destroy` + to avoid this type of error. +short_description: Creates a GCP NetworkEndpointGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + network_endpoint_type: + description: + - Type of network endpoints in this network endpoint group. + - 'Some valid choices include: "GCE_VM_IP_PORT"' + required: false + default: GCE_VM_IP_PORT + type: str + network: + description: + - The network to which all network endpoints in the NEG belong. + - Uses "default" project network if unspecified. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: true + type: dict + subnetwork: + description: + - Optional subnetwork to which all network endpoints in the NEG belong. + - 'This field represents a link to a Subnetwork resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_subnetwork task and then set this subnetwork field to "{{ name-of-resource + }}"' + required: false + type: dict + default_port: + description: + - The default port used if the port number is not specified in the network endpoint. + required: false + type: int + zone: + description: + - Zone where the network endpoint group is located. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)' +- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/negs/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: "{{ resource_name }}" + auto_create_subnetworks: 'false' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a subnetwork + google.cloud.gcp_compute_subnetwork: + name: "{{ resource_name }}" + ip_cidr_range: 10.0.0.0/16 + region: us-central1 + network: "{{ network }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: subnetwork + +- name: create a network endpoint group + google.cloud.gcp_compute_network_endpoint_group: + name: test_object + network: "{{ network }}" + subnetwork: "{{ subnetwork }}" + default_port: 90 + zone: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +networkEndpointType: + description: + - Type of network endpoints in this network endpoint group. + returned: success + type: str +size: + description: + - Number of network endpoints in the network endpoint group. + returned: success + type: int +network: + description: + - The network to which all network endpoints in the NEG belong. + - Uses "default" project network if unspecified. + returned: success + type: dict +subnetwork: + description: + - Optional subnetwork to which all network endpoints in the NEG belong. + returned: success + type: dict +defaultPort: + description: + - The default port used if the port number is not specified in the network endpoint. + returned: success + type: int +zone: + description: + - Zone where the network endpoint group is located. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + network_endpoint_type=dict(default='GCE_VM_IP_PORT', type='str'), + network=dict(required=True, type='dict'), + subnetwork=dict(type='dict'), + default_port=dict(type='int'), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#networkEndpointGroup' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#networkEndpointGroup', + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'networkEndpointType': module.params.get('network_endpoint_type'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink'), + u'defaultPort': module.params.get('default_port'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'networkEndpointType': response.get(u'networkEndpointType'), + u'size': response.get(u'size'), + u'network': response.get(u'network'), + u'subnetwork': response.get(u'subnetwork'), + u'defaultPort': response.get(u'defaultPort'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#networkEndpointGroup') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group_info.py new file mode 100644 index 000000000..8f9d1a8e8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_endpoint_group_info.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_network_endpoint_group_info +description: +- Gather info for GCP NetworkEndpointGroup +short_description: Gather info for GCP NetworkEndpointGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - Zone where the network endpoint group is located. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a network endpoint group + gcp_compute_network_endpoint_group_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + networkEndpointType: + description: + - Type of network endpoints in this network endpoint group. + returned: success + type: str + size: + description: + - Number of network endpoints in the network endpoint group. + returned: success + type: int + network: + description: + - The network to which all network endpoints in the NEG belong. + - Uses "default" project network if unspecified. + returned: success + type: dict + subnetwork: + description: + - Optional subnetwork to which all network endpoints in the NEG belong. + returned: success + type: dict + defaultPort: + description: + - The default port used if the port number is not specified in the network endpoint. + returned: success + type: int + zone: + description: + - Zone where the network endpoint group is located. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_info.py new file mode 100644 index 000000000..f2b7c498f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_network_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_network_info +description: +- Gather info for GCP Network +short_description: Gather info for GCP Network +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a network + gcp_compute_network_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - An optional description of this resource. The resource must be recreated to + modify this field. + returned: success + type: str + gateway_ipv4: + description: + - The gateway address for default routing out of the network. This value is + selected by GCP. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + subnetworks: + description: + - Server-defined fully-qualified URLs for all subnetworks in this network. + returned: success + type: list + autoCreateSubnetworks: + description: + - When set to `true`, the network is created in "auto subnet mode" and it will + create a subnet for each region automatically across the `10.128.0.0/9` address + range. + - When set to `false`, the network is created in "custom subnet mode" so the + user can explicitly connect subnetwork resources. + returned: success + type: bool + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + routingConfig: + description: + - The network-level routing configuration for this network. Used by Cloud Router + to determine what type of network-wide routing behavior to enforce. + returned: success + type: complex + contains: + routingMode: + description: + - The network-wide routing mode to use. If set to `REGIONAL`, this network's + cloud routers will only advertise routes with subnetworks of this network + in the same region as the router. If set to `GLOBAL`, this network's cloud + routers will advertise routes with all subnetworks of this network, across + regions. + returned: success + type: str + mtu: + description: + - Maximum Transmission Unit in bytes. The minimum value for this field is 1460 + and the maximum value is 1500 bytes. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/networks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group.py new file mode 100644 index 000000000..e8bf72577 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group.py @@ -0,0 +1,567 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_node_group +description: +- Represents a NodeGroup resource to manage a group of sole-tenant nodes. +short_description: Creates a GCP NodeGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional textual description of the resource. + required: false + type: str + name: + description: + - Name of the resource. + required: false + type: str + node_template: + description: + - The URL of the node template to which this node group belongs. + - 'This field represents a link to a NodeTemplate resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_node_template task and then set this node_template field to + "{{ name-of-resource }}"' + required: true + type: dict + size: + description: + - The total number of nodes in the node group. + required: true + type: int + maintenance_policy: + description: + - 'Specifies how to handle instances when a node in the group undergoes maintenance. + Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The + default value is DEFAULT.' + required: false + default: DEFAULT + type: str + maintenance_window: + description: + - contains properties for the timeframe of maintenance . + required: false + type: dict + suboptions: + start_time: + description: + - instances.start time of the window. This must be in UTC format that resolves + to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both + 13:00-5 and 08:00 are valid. + required: true + type: str + autoscaling_policy: + description: + - If you use sole-tenant nodes for your workloads, you can use the node group + autoscaler to automatically manage the sizes of your node groups. + required: false + type: dict + suboptions: + mode: + description: + - 'The autoscaling mode. Set to one of the following: - OFF: Disables the + autoscaler.' + - "- ON: Enables scaling in and scaling out." + - "- ONLY_SCALE_OUT: Enables only scaling out." + - You must use this mode if your node groups are configured to restart their + hosted VMs on minimal servers. + - 'Some valid choices include: "OFF", "ON", "ONLY_SCALE_OUT"' + required: true + type: str + min_nodes: + description: + - Minimum size of the node group. Must be less than or equal to max-nodes. + The default value is 0. + required: false + type: int + max_nodes: + description: + - Maximum size of the node group. Set to a value less than or equal to 100 + and greater than or equal to min-nodes. + required: true + type: int + zone: + description: + - Zone where this node group is located . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)' +- 'Sole-Tenant Nodes: U(https://cloud.google.com/compute/docs/nodes/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a node template + google.cloud.gcp_compute_node_template: + name: "{{ resource_name }}" + region: us-central1 + node_type: n1-node-96-624 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: node_template + +- name: create a node group + google.cloud.gcp_compute_node_group: + name: test_object + zone: us-central1-a + description: example group for ansible + size: 1 + node_template: "{{ node_template }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional textual description of the resource. + returned: success + type: str +name: + description: + - Name of the resource. + returned: success + type: str +nodeTemplate: + description: + - The URL of the node template to which this node group belongs. + returned: success + type: dict +size: + description: + - The total number of nodes in the node group. + returned: success + type: int +maintenancePolicy: + description: + - 'Specifies how to handle instances when a node in the group undergoes maintenance. + Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default + value is DEFAULT.' + returned: success + type: str +maintenanceWindow: + description: + - contains properties for the timeframe of maintenance . + returned: success + type: complex + contains: + startTime: + description: + - instances.start time of the window. This must be in UTC format that resolves + to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 + and 08:00 are valid. + returned: success + type: str +autoscalingPolicy: + description: + - If you use sole-tenant nodes for your workloads, you can use the node group autoscaler + to automatically manage the sizes of your node groups. + returned: success + type: complex + contains: + mode: + description: + - 'The autoscaling mode. Set to one of the following: - OFF: Disables the autoscaler.' + - "- ON: Enables scaling in and scaling out." + - "- ONLY_SCALE_OUT: Enables only scaling out." + - You must use this mode if your node groups are configured to restart their + hosted VMs on minimal servers. + returned: success + type: str + minNodes: + description: + - Minimum size of the node group. Must be less than or equal to max-nodes. The + default value is 0. + returned: success + type: int + maxNodes: + description: + - Maximum size of the node group. Set to a value less than or equal to 100 and + greater than or equal to min-nodes. + returned: success + type: int +zone: + description: + - Zone where this node group is located . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(type='str'), + node_template=dict(required=True, type='dict'), + size=dict(required=True, type='int'), + maintenance_policy=dict(default='DEFAULT', type='str'), + maintenance_window=dict(type='dict', options=dict(start_time=dict(required=True, type='str'))), + autoscaling_policy=dict( + type='dict', options=dict(mode=dict(required=True, type='str'), min_nodes=dict(type='int'), max_nodes=dict(required=True, type='int')) + ), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#NodeGroup' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('nodeTemplate') != request.get('nodeTemplate'): + node_template_update(module, request, response) + + +def node_template_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/nodeGroups/{name}/setNodeTemplate"]).format(**module.params), + {u'nodeTemplate': replace_resource_dict(module.params.get(u'node_template', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#NodeGroup', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'nodeTemplate': replace_resource_dict(module.params.get(u'node_template', {}), 'selfLink'), + u'size': module.params.get('size'), + u'maintenancePolicy': module.params.get('maintenance_policy'), + u'maintenanceWindow': NodeGroupMaintenancewindow(module.params.get('maintenance_window', {}), module).to_request(), + u'autoscalingPolicy': NodeGroupAutoscalingpolicy(module.params.get('autoscaling_policy', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/nodeGroups".format(**module.params) + + +def create_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/nodeGroups?initialNodeCount={size}".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'name': response.get(u'name'), + u'nodeTemplate': response.get(u'nodeTemplate'), + u'size': module.params.get('size'), + u'maintenancePolicy': response.get(u'maintenancePolicy'), + u'maintenanceWindow': NodeGroupMaintenancewindow(response.get(u'maintenanceWindow', {}), module).from_response(), + u'autoscalingPolicy': NodeGroupAutoscalingpolicy(response.get(u'autoscalingPolicy', {}), module).from_response(), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def zone_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/zones/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#NodeGroup') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class NodeGroupMaintenancewindow(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'startTime': self.request.get('start_time')}) + + def from_response(self): + return remove_nones_from_dict({u'startTime': self.request.get(u'startTime')}) + + +class NodeGroupAutoscalingpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'mode': self.request.get('mode'), u'minNodes': self.request.get('min_nodes'), u'maxNodes': self.request.get('max_nodes')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'mode': self.request.get(u'mode'), u'minNodes': self.request.get(u'minNodes'), u'maxNodes': self.request.get(u'maxNodes')} + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group_info.py new file mode 100644 index 000000000..68f340994 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_group_info.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_node_group_info +description: +- Gather info for GCP NodeGroup +short_description: Gather info for GCP NodeGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - Zone where this node group is located . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a node group + gcp_compute_node_group_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional textual description of the resource. + returned: success + type: str + name: + description: + - Name of the resource. + returned: success + type: str + nodeTemplate: + description: + - The URL of the node template to which this node group belongs. + returned: success + type: dict + size: + description: + - The total number of nodes in the node group. + returned: success + type: int + maintenancePolicy: + description: + - 'Specifies how to handle instances when a node in the group undergoes maintenance. + Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The + default value is DEFAULT.' + returned: success + type: str + maintenanceWindow: + description: + - contains properties for the timeframe of maintenance . + returned: success + type: complex + contains: + startTime: + description: + - instances.start time of the window. This must be in UTC format that resolves + to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both + 13:00-5 and 08:00 are valid. + returned: success + type: str + autoscalingPolicy: + description: + - If you use sole-tenant nodes for your workloads, you can use the node group + autoscaler to automatically manage the sizes of your node groups. + returned: success + type: complex + contains: + mode: + description: + - 'The autoscaling mode. Set to one of the following: - OFF: Disables the + autoscaler.' + - "- ON: Enables scaling in and scaling out." + - "- ONLY_SCALE_OUT: Enables only scaling out." + - You must use this mode if your node groups are configured to restart their + hosted VMs on minimal servers. + returned: success + type: str + minNodes: + description: + - Minimum size of the node group. Must be less than or equal to max-nodes. + The default value is 0. + returned: success + type: int + maxNodes: + description: + - Maximum size of the node group. Set to a value less than or equal to 100 + and greater than or equal to min-nodes. + returned: success + type: int + zone: + description: + - Zone where this node group is located . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/nodeGroups".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template.py new file mode 100644 index 000000000..5db26eaea --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template.py @@ -0,0 +1,519 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_node_template +description: +- Represents a NodeTemplate resource. Node templates specify properties for creating + sole-tenant nodes, such as node type, vCPU and memory requirements, node affinity + labels, and region. +short_description: Creates a GCP NodeTemplate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional textual description of the resource. + required: false + type: str + name: + description: + - Name of the resource. + required: false + type: str + node_affinity_labels: + description: + - Labels to use for node affinity, which will be used in instance scheduling. + required: false + type: dict + node_type: + description: + - Node type to use for nodes group that are created from this template. + - Only one of nodeTypeFlexibility and nodeType can be specified. + required: false + type: str + node_type_flexibility: + description: + - Flexible properties for the desired node type. Node groups that use this node + template will create nodes of a type that matches these properties. Only one + of nodeTypeFlexibility and nodeType can be specified. + required: false + type: dict + suboptions: + cpus: + description: + - Number of virtual CPUs to use. + required: false + type: str + memory: + description: + - Physical memory available to the node, defined in MB. + required: false + type: str + server_binding: + description: + - The server binding policy for nodes using this template. Determines where the + nodes should restart following a maintenance event. + required: false + type: dict + suboptions: + type: + description: + - Type of server binding policy. If `RESTART_NODE_ON_ANY_SERVER`, nodes using + this template will restart on any physical server following a maintenance + event. + - If `RESTART_NODE_ON_MINIMAL_SERVER`, nodes using this template will restart + on the same physical server following a maintenance event, instead of being + live migrated to or restarted on a new physical server. This option may + be useful if you are using software licenses tied to the underlying server + characteristics such as physical sockets or cores, to avoid the need for + additional licenses when maintenance occurs. However, VMs on such nodes + will experience outages while maintenance is applied. + - 'Some valid choices include: "RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"' + required: true + type: str + cpu_overcommit_type: + description: + - CPU overcommit. + - 'Some valid choices include: "ENABLED", "NONE"' + required: false + default: NONE + type: str + region: + description: + - Region where nodes using the node template will be created . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/nodeTemplates)' +- 'Sole-Tenant Nodes: U(https://cloud.google.com/compute/docs/nodes/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a node template + google.cloud.gcp_compute_node_template: + name: test_object + region: us-central1 + node_type: n1-node-96-624 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional textual description of the resource. + returned: success + type: str +name: + description: + - Name of the resource. + returned: success + type: str +nodeAffinityLabels: + description: + - Labels to use for node affinity, which will be used in instance scheduling. + returned: success + type: dict +nodeType: + description: + - Node type to use for nodes group that are created from this template. + - Only one of nodeTypeFlexibility and nodeType can be specified. + returned: success + type: str +nodeTypeFlexibility: + description: + - Flexible properties for the desired node type. Node groups that use this node + template will create nodes of a type that matches these properties. Only one of + nodeTypeFlexibility and nodeType can be specified. + returned: success + type: complex + contains: + cpus: + description: + - Number of virtual CPUs to use. + returned: success + type: str + memory: + description: + - Physical memory available to the node, defined in MB. + returned: success + type: str + localSsd: + description: + - Use local SSD . + returned: success + type: str +serverBinding: + description: + - The server binding policy for nodes using this template. Determines where the + nodes should restart following a maintenance event. + returned: success + type: complex + contains: + type: + description: + - Type of server binding policy. If `RESTART_NODE_ON_ANY_SERVER`, nodes using + this template will restart on any physical server following a maintenance + event. + - If `RESTART_NODE_ON_MINIMAL_SERVER`, nodes using this template will restart + on the same physical server following a maintenance event, instead of being + live migrated to or restarted on a new physical server. This option may be + useful if you are using software licenses tied to the underlying server characteristics + such as physical sockets or cores, to avoid the need for additional licenses + when maintenance occurs. However, VMs on such nodes will experience outages + while maintenance is applied. + returned: success + type: str +cpuOvercommitType: + description: + - CPU overcommit. + returned: success + type: str +region: + description: + - Region where nodes using the node template will be created . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(type='str'), + node_affinity_labels=dict(type='dict'), + node_type=dict(type='str'), + node_type_flexibility=dict(type='dict', options=dict(cpus=dict(type='str'), memory=dict(type='str'))), + server_binding=dict(type='dict', options=dict(type=dict(required=True, type='str'))), + cpu_overcommit_type=dict(default='NONE', type='str'), + region=dict(required=True, type='str'), + ), + mutually_exclusive=[['node_type', 'node_type_flexibility']], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#nodeTemplate' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#nodeTemplate', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'nodeAffinityLabels': module.params.get('node_affinity_labels'), + u'nodeType': module.params.get('node_type'), + u'nodeTypeFlexibility': NodeTemplateNodetypeflexibility(module.params.get('node_type_flexibility', {}), module).to_request(), + u'serverBinding': NodeTemplateServerbinding(module.params.get('server_binding', {}), module).to_request(), + u'cpuOvercommitType': module.params.get('cpu_overcommit_type'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/nodeTemplates".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'name': response.get(u'name'), + u'nodeAffinityLabels': response.get(u'nodeAffinityLabels'), + u'nodeType': response.get(u'nodeType'), + u'nodeTypeFlexibility': NodeTemplateNodetypeflexibility(response.get(u'nodeTypeFlexibility', {}), module).from_response(), + u'serverBinding': NodeTemplateServerbinding(response.get(u'serverBinding', {}), module).from_response(), + u'cpuOvercommitType': response.get(u'cpuOvercommitType'), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#nodeTemplate') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class NodeTemplateNodetypeflexibility(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'cpus': self.request.get('cpus'), u'memory': self.request.get('memory')}) + + def from_response(self): + return remove_nones_from_dict({u'cpus': self.request.get(u'cpus'), u'memory': self.request.get(u'memory')}) + + +class NodeTemplateServerbinding(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'type': self.request.get('type')}) + + def from_response(self): + return remove_nones_from_dict({u'type': self.request.get(u'type')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template_info.py new file mode 100644 index 000000000..6859ca83e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_node_template_info.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_node_template_info +description: +- Gather info for GCP NodeTemplate +short_description: Gather info for GCP NodeTemplate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - Region where nodes using the node template will be created . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a node template + gcp_compute_node_template_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional textual description of the resource. + returned: success + type: str + name: + description: + - Name of the resource. + returned: success + type: str + nodeAffinityLabels: + description: + - Labels to use for node affinity, which will be used in instance scheduling. + returned: success + type: dict + nodeType: + description: + - Node type to use for nodes group that are created from this template. + - Only one of nodeTypeFlexibility and nodeType can be specified. + returned: success + type: str + nodeTypeFlexibility: + description: + - Flexible properties for the desired node type. Node groups that use this node + template will create nodes of a type that matches these properties. Only one + of nodeTypeFlexibility and nodeType can be specified. + returned: success + type: complex + contains: + cpus: + description: + - Number of virtual CPUs to use. + returned: success + type: str + memory: + description: + - Physical memory available to the node, defined in MB. + returned: success + type: str + localSsd: + description: + - Use local SSD . + returned: success + type: str + serverBinding: + description: + - The server binding policy for nodes using this template. Determines where + the nodes should restart following a maintenance event. + returned: success + type: complex + contains: + type: + description: + - Type of server binding policy. If `RESTART_NODE_ON_ANY_SERVER`, nodes + using this template will restart on any physical server following a maintenance + event. + - If `RESTART_NODE_ON_MINIMAL_SERVER`, nodes using this template will restart + on the same physical server following a maintenance event, instead of + being live migrated to or restarted on a new physical server. This option + may be useful if you are using software licenses tied to the underlying + server characteristics such as physical sockets or cores, to avoid the + need for additional licenses when maintenance occurs. However, VMs on + such nodes will experience outages while maintenance is applied. + returned: success + type: str + cpuOvercommitType: + description: + - CPU overcommit. + returned: success + type: str + region: + description: + - Region where nodes using the node template will be created . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/nodeTemplates".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler.py new file mode 100644 index 000000000..06cadd33f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler.py @@ -0,0 +1,903 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_autoscaler +description: +- Represents an Autoscaler resource. +- Autoscalers allow you to automatically scale virtual machine instances in managed + instance groups according to an autoscaling policy that you define. +short_description: Creates a GCP RegionAutoscaler +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + autoscaling_policy: + description: + - 'The configuration parameters for the autoscaling algorithm. You can define + one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, + and loadBalancingUtilization.' + - If none of these are specified, the default will be to autoscale based on cpuUtilization + to 0.6 or 60%. + required: true + type: dict + suboptions: + min_num_replicas: + description: + - The minimum number of replicas that the autoscaler can scale down to. This + cannot be less than 0. If not provided, autoscaler will choose a default + value depending on maximum number of instances allowed. + required: false + type: int + max_num_replicas: + description: + - The maximum number of instances that the autoscaler can scale up to. This + is required when creating or updating an autoscaler. The maximum number + of replicas should not be lower than minimal number of replicas. + required: true + type: int + cool_down_period_sec: + description: + - The number of seconds that the autoscaler should wait before it starts collecting + information from a new instance. This prevents the autoscaler from collecting + information when the instance is initializing, during which the collected + usage would not be reliable. The default time autoscaler waits is 60 seconds. + - Virtual machine initialization times might vary because of numerous factors. + We recommend that you test how long an instance may take to initialize. + To do this, create an instance and time the startup process. + required: false + default: '60' + type: int + mode: + description: + - Defines operating mode for this policy. + - 'Some valid choices include: "OFF", "ONLY_UP", "ON"' + required: false + default: 'ON' + type: str + scale_in_control: + description: + - Defines scale in controls to reduce the risk of response latency and outages + due to abrupt scale-in events . + required: false + type: dict + suboptions: + max_scaled_in_replicas: + description: + - A nested object resource. + required: false + type: dict + suboptions: + fixed: + description: + - Specifies a fixed number of VM instances. This must be a positive + integer. + required: false + type: int + percent: + description: + - Specifies a percentage of instances between 0 to 100%, inclusive. + - For example, specify 80 for 80%. + required: false + type: int + time_window_sec: + description: + - How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + required: false + type: int + cpu_utilization: + description: + - Defines the CPU utilization policy that allows the autoscaler to scale based + on the average CPU utilization of a managed instance group. + required: false + type: dict + suboptions: + utilization_target: + description: + - The target CPU utilization that the autoscaler should maintain. + - Must be a float value in the range (0, 1]. If not specified, the default + is 0.6. + - If the CPU level is below the target utilization, the autoscaler scales + down the number of instances until it reaches the minimum number of + instances you specified or until the average CPU of your instances reaches + the target utilization. + - If the average CPU is above the target utilization, the autoscaler scales + up until it reaches the maximum number of instances you specified or + until the average utilization reaches the target utilization. + required: false + type: str + predictive_method: + description: + - 'Indicates whether predictive autoscaling based on CPU metric is enabled. + Valid values are: - NONE (default). No predictive method is used. The + autoscaler scales the group to meet current demand based on real-time + metrics.' + - "- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + by monitoring daily and weekly load patterns and scaling out ahead of + anticipated demand." + required: false + default: NONE + type: str + custom_metric_utilizations: + description: + - Configuration parameters of autoscaling based on a custom metric. + elements: dict + required: false + type: list + suboptions: + metric: + description: + - The identifier (type) of the Stackdriver Monitoring metric. + - The metric cannot have negative values. + - The metric must have a value type of INT64 or DOUBLE. + required: true + type: str + utilization_target: + description: + - The target value of the metric that autoscaler should maintain. This + must be a positive value. A utilization metric scales number of virtual + machines handling requests to increase or decrease proportionally to + the metric. + - For example, a good metric to use as a utilizationTarget is U(www.googleapis.com/compute/instance/network/received_bytes_count). + - The autoscaler will work to keep this value constant for each of the + instances. + required: false + type: str + utilization_target_type: + description: + - Defines how target utilization value is expressed for a Stackdriver + Monitoring metric. + - 'Some valid choices include: "GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"' + required: false + type: str + load_balancing_utilization: + description: + - Configuration parameters of autoscaling based on a load balancer. + required: false + type: dict + suboptions: + utilization_target: + description: + - Fraction of backend capacity utilization (set in HTTP(s) load balancing + configuration) that autoscaler should maintain. Must be a positive float + value. If not defined, the default is 0.8. + required: false + type: str + target: + description: + - URL of the managed instance group that this autoscaler will scale. + required: true + type: str + region: + description: + - URL of the region where the instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/regionAutoscalers)' +- 'Autoscaling Groups of Instances: U(https://cloud.google.com/compute/docs/autoscaler/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instancetemplate + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a address + google.cloud.gcp_compute_address: + name: address-instancetemplate + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a instance template + google.cloud.gcp_compute_instance_template: + name: "{{ resource_name }}" + properties: + disks: + - auto_delete: 'true' + boot: 'true' + initialize_params: + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + machine_type: n1-standard-1 + network_interfaces: + - network: "{{ network }}" + access_configs: + - name: test-config + type: ONE_TO_ONE_NAT + nat_ip: "{{ address }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancetemplate + +- name: create a region instance group manager + google.cloud.gcp_compute_region_instance_group_manager: + name: "{{ resource_name }}" + base_instance_name: test1-child + region: us-central1 + instance_template: "{{ instancetemplate }}" + target_size: 3 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: igrm + +- name: create a region autoscaler + google.cloud.gcp_compute_region_autoscaler: + name: my-region-autoscaler + region: us-central1 + autoscaling_policy: + min_num_replicas: 1 + max_num_replicas: 5 + cool_down_period_sec: 60 + cpu_utilization: + utilization_target: 0.5 + target: "{{igrm.selfLink}}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +id: + description: + - Unique identifier for the resource. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +name: + description: + - Name of the resource. The name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + a lowercase letter, and all following characters must be a dash, lowercase letter, + or digit, except the last character, which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +autoscalingPolicy: + description: + - 'The configuration parameters for the autoscaling algorithm. You can define one + or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, + and loadBalancingUtilization.' + - If none of these are specified, the default will be to autoscale based on cpuUtilization + to 0.6 or 60%. + returned: success + type: complex + contains: + minNumReplicas: + description: + - The minimum number of replicas that the autoscaler can scale down to. This + cannot be less than 0. If not provided, autoscaler will choose a default value + depending on maximum number of instances allowed. + returned: success + type: int + maxNumReplicas: + description: + - The maximum number of instances that the autoscaler can scale up to. This + is required when creating or updating an autoscaler. The maximum number of + replicas should not be lower than minimal number of replicas. + returned: success + type: int + coolDownPeriodSec: + description: + - The number of seconds that the autoscaler should wait before it starts collecting + information from a new instance. This prevents the autoscaler from collecting + information when the instance is initializing, during which the collected + usage would not be reliable. The default time autoscaler waits is 60 seconds. + - Virtual machine initialization times might vary because of numerous factors. + We recommend that you test how long an instance may take to initialize. To + do this, create an instance and time the startup process. + returned: success + type: int + mode: + description: + - Defines operating mode for this policy. + returned: success + type: str + scaleInControl: + description: + - Defines scale in controls to reduce the risk of response latency and outages + due to abrupt scale-in events . + returned: success + type: complex + contains: + maxScaledInReplicas: + description: + - A nested object resource. + returned: success + type: complex + contains: + fixed: + description: + - Specifies a fixed number of VM instances. This must be a positive + integer. + returned: success + type: int + percent: + description: + - Specifies a percentage of instances between 0 to 100%, inclusive. + - For example, specify 80 for 80%. + returned: success + type: int + timeWindowSec: + description: + - How long back autoscaling should look when computing recommendations to + include directives regarding slower scale down, as described above. + returned: success + type: int + cpuUtilization: + description: + - Defines the CPU utilization policy that allows the autoscaler to scale based + on the average CPU utilization of a managed instance group. + returned: success + type: complex + contains: + utilizationTarget: + description: + - The target CPU utilization that the autoscaler should maintain. + - Must be a float value in the range (0, 1]. If not specified, the default + is 0.6. + - If the CPU level is below the target utilization, the autoscaler scales + down the number of instances until it reaches the minimum number of instances + you specified or until the average CPU of your instances reaches the target + utilization. + - If the average CPU is above the target utilization, the autoscaler scales + up until it reaches the maximum number of instances you specified or until + the average utilization reaches the target utilization. + returned: success + type: str + predictiveMethod: + description: + - 'Indicates whether predictive autoscaling based on CPU metric is enabled. + Valid values are: - NONE (default). No predictive method is used. The + autoscaler scales the group to meet current demand based on real-time + metrics.' + - "- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + by monitoring daily and weekly load patterns and scaling out ahead of + anticipated demand." + returned: success + type: str + customMetricUtilizations: + description: + - Configuration parameters of autoscaling based on a custom metric. + returned: success + type: complex + contains: + metric: + description: + - The identifier (type) of the Stackdriver Monitoring metric. + - The metric cannot have negative values. + - The metric must have a value type of INT64 or DOUBLE. + returned: success + type: str + utilizationTarget: + description: + - The target value of the metric that autoscaler should maintain. This must + be a positive value. A utilization metric scales number of virtual machines + handling requests to increase or decrease proportionally to the metric. + - For example, a good metric to use as a utilizationTarget is U(www.googleapis.com/compute/instance/network/received_bytes_count). + - The autoscaler will work to keep this value constant for each of the instances. + returned: success + type: str + utilizationTargetType: + description: + - Defines how target utilization value is expressed for a Stackdriver Monitoring + metric. + returned: success + type: str + loadBalancingUtilization: + description: + - Configuration parameters of autoscaling based on a load balancer. + returned: success + type: complex + contains: + utilizationTarget: + description: + - Fraction of backend capacity utilization (set in HTTP(s) load balancing + configuration) that autoscaler should maintain. Must be a positive float + value. If not defined, the default is 0.8. + returned: success + type: str +target: + description: + - URL of the managed instance group that this autoscaler will scale. + returned: success + type: str +region: + description: + - URL of the region where the instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + autoscaling_policy=dict( + required=True, + type='dict', + options=dict( + min_num_replicas=dict(type='int'), + max_num_replicas=dict(required=True, type='int'), + cool_down_period_sec=dict(default=60, type='int'), + mode=dict(default='ON', type='str'), + scale_in_control=dict( + type='dict', + options=dict( + max_scaled_in_replicas=dict(type='dict', options=dict(fixed=dict(type='int'), percent=dict(type='int'))), + time_window_sec=dict(type='int'), + ), + ), + cpu_utilization=dict(type='dict', options=dict(utilization_target=dict(type='str'), predictive_method=dict(default='NONE', type='str'))), + custom_metric_utilizations=dict( + type='list', + elements='dict', + options=dict(metric=dict(required=True, type='str'), utilization_target=dict(type='str'), utilization_target_type=dict(type='str')), + ), + load_balancing_utilization=dict(type='dict', options=dict(utilization_target=dict(type='str'))), + ), + ), + target=dict(required=True, type='str'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#autoscaler' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#autoscaler', + u'region': module.params.get('region'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'autoscalingPolicy': RegionAutoscalerAutoscalingpolicy(module.params.get('autoscaling_policy', {}), module).to_request(), + u'target': module.params.get('target'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/autoscalers/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/autoscalers".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'id': response.get(u'id'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'name': module.params.get('name'), + u'description': response.get(u'description'), + u'autoscalingPolicy': RegionAutoscalerAutoscalingpolicy(response.get(u'autoscalingPolicy', {}), module).from_response(), + u'target': response.get(u'target'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#autoscaler') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RegionAutoscalerAutoscalingpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'minNumReplicas': self.request.get('min_num_replicas'), + u'maxNumReplicas': self.request.get('max_num_replicas'), + u'coolDownPeriodSec': self.request.get('cool_down_period_sec'), + u'mode': self.request.get('mode'), + u'scaleInControl': RegionAutoscalerScaleincontrol(self.request.get('scale_in_control', {}), self.module).to_request(), + u'cpuUtilization': RegionAutoscalerCpuutilization(self.request.get('cpu_utilization', {}), self.module).to_request(), + u'customMetricUtilizations': RegionAutoscalerCustommetricutilizationsArray( + self.request.get('custom_metric_utilizations', []), self.module + ).to_request(), + u'loadBalancingUtilization': RegionAutoscalerLoadbalancingutilization( + self.request.get('load_balancing_utilization', {}), self.module + ).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'minNumReplicas': self.request.get(u'minNumReplicas'), + u'maxNumReplicas': self.request.get(u'maxNumReplicas'), + u'coolDownPeriodSec': self.request.get(u'coolDownPeriodSec'), + u'mode': self.request.get(u'mode'), + u'scaleInControl': RegionAutoscalerScaleincontrol(self.request.get(u'scaleInControl', {}), self.module).from_response(), + u'cpuUtilization': RegionAutoscalerCpuutilization(self.request.get(u'cpuUtilization', {}), self.module).from_response(), + u'customMetricUtilizations': RegionAutoscalerCustommetricutilizationsArray( + self.request.get(u'customMetricUtilizations', []), self.module + ).from_response(), + u'loadBalancingUtilization': RegionAutoscalerLoadbalancingutilization( + self.request.get(u'loadBalancingUtilization', {}), self.module + ).from_response(), + } + ) + + +class RegionAutoscalerScaleincontrol(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'maxScaledInReplicas': RegionAutoscalerMaxscaledinreplicas(self.request.get('max_scaled_in_replicas', {}), self.module).to_request(), + u'timeWindowSec': self.request.get('time_window_sec'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'maxScaledInReplicas': RegionAutoscalerMaxscaledinreplicas(self.request.get(u'maxScaledInReplicas', {}), self.module).from_response(), + u'timeWindowSec': self.request.get(u'timeWindowSec'), + } + ) + + +class RegionAutoscalerMaxscaledinreplicas(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'fixed': self.request.get('fixed'), u'percent': self.request.get('percent')}) + + def from_response(self): + return remove_nones_from_dict({u'fixed': self.request.get(u'fixed'), u'percent': self.request.get(u'percent')}) + + +class RegionAutoscalerCpuutilization(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'utilizationTarget': self.request.get('utilization_target'), u'predictiveMethod': self.request.get('predictive_method')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'utilizationTarget': self.request.get(u'utilizationTarget'), u'predictiveMethod': self.request.get(u'predictiveMethod')} + ) + + +class RegionAutoscalerCustommetricutilizationsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + {u'metric': item.get('metric'), u'utilizationTarget': item.get('utilization_target'), u'utilizationTargetType': item.get('utilization_target_type')} + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'metric': item.get(u'metric'), u'utilizationTarget': item.get(u'utilizationTarget'), u'utilizationTargetType': item.get(u'utilizationTargetType')} + ) + + +class RegionAutoscalerLoadbalancingutilization(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'utilizationTarget': self.request.get('utilization_target')}) + + def from_response(self): + return remove_nones_from_dict({u'utilizationTarget': self.request.get(u'utilizationTarget')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler_info.py new file mode 100644 index 000000000..f53968e28 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_autoscaler_info.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_autoscaler_info +description: +- Gather info for GCP RegionAutoscaler +short_description: Gather info for GCP RegionAutoscaler +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - URL of the region where the instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region autoscaler + gcp_compute_region_autoscaler_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - Unique identifier for the resource. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + autoscalingPolicy: + description: + - 'The configuration parameters for the autoscaling algorithm. You can define + one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, + and loadBalancingUtilization.' + - If none of these are specified, the default will be to autoscale based on + cpuUtilization to 0.6 or 60%. + returned: success + type: complex + contains: + minNumReplicas: + description: + - The minimum number of replicas that the autoscaler can scale down to. + This cannot be less than 0. If not provided, autoscaler will choose a + default value depending on maximum number of instances allowed. + returned: success + type: int + maxNumReplicas: + description: + - The maximum number of instances that the autoscaler can scale up to. This + is required when creating or updating an autoscaler. The maximum number + of replicas should not be lower than minimal number of replicas. + returned: success + type: int + coolDownPeriodSec: + description: + - The number of seconds that the autoscaler should wait before it starts + collecting information from a new instance. This prevents the autoscaler + from collecting information when the instance is initializing, during + which the collected usage would not be reliable. The default time autoscaler + waits is 60 seconds. + - Virtual machine initialization times might vary because of numerous factors. + We recommend that you test how long an instance may take to initialize. + To do this, create an instance and time the startup process. + returned: success + type: int + mode: + description: + - Defines operating mode for this policy. + returned: success + type: str + scaleInControl: + description: + - Defines scale in controls to reduce the risk of response latency and outages + due to abrupt scale-in events . + returned: success + type: complex + contains: + maxScaledInReplicas: + description: + - A nested object resource. + returned: success + type: complex + contains: + fixed: + description: + - Specifies a fixed number of VM instances. This must be a positive + integer. + returned: success + type: int + percent: + description: + - Specifies a percentage of instances between 0 to 100%, inclusive. + - For example, specify 80 for 80%. + returned: success + type: int + timeWindowSec: + description: + - How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + returned: success + type: int + cpuUtilization: + description: + - Defines the CPU utilization policy that allows the autoscaler to scale + based on the average CPU utilization of a managed instance group. + returned: success + type: complex + contains: + utilizationTarget: + description: + - The target CPU utilization that the autoscaler should maintain. + - Must be a float value in the range (0, 1]. If not specified, the default + is 0.6. + - If the CPU level is below the target utilization, the autoscaler scales + down the number of instances until it reaches the minimum number of + instances you specified or until the average CPU of your instances + reaches the target utilization. + - If the average CPU is above the target utilization, the autoscaler + scales up until it reaches the maximum number of instances you specified + or until the average utilization reaches the target utilization. + returned: success + type: str + predictiveMethod: + description: + - 'Indicates whether predictive autoscaling based on CPU metric is enabled. + Valid values are: - NONE (default). No predictive method is used. + The autoscaler scales the group to meet current demand based on real-time + metrics.' + - "- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + by monitoring daily and weekly load patterns and scaling out ahead + of anticipated demand." + returned: success + type: str + customMetricUtilizations: + description: + - Configuration parameters of autoscaling based on a custom metric. + returned: success + type: complex + contains: + metric: + description: + - The identifier (type) of the Stackdriver Monitoring metric. + - The metric cannot have negative values. + - The metric must have a value type of INT64 or DOUBLE. + returned: success + type: str + utilizationTarget: + description: + - The target value of the metric that autoscaler should maintain. This + must be a positive value. A utilization metric scales number of virtual + machines handling requests to increase or decrease proportionally + to the metric. + - For example, a good metric to use as a utilizationTarget is U(www.googleapis.com/compute/instance/network/received_bytes_count). + - The autoscaler will work to keep this value constant for each of the + instances. + returned: success + type: str + utilizationTargetType: + description: + - Defines how target utilization value is expressed for a Stackdriver + Monitoring metric. + returned: success + type: str + loadBalancingUtilization: + description: + - Configuration parameters of autoscaling based on a load balancer. + returned: success + type: complex + contains: + utilizationTarget: + description: + - Fraction of backend capacity utilization (set in HTTP(s) load balancing + configuration) that autoscaler should maintain. Must be a positive + float value. If not defined, the default is 0.8. + returned: success + type: str + target: + description: + - URL of the managed instance group that this autoscaler will scale. + returned: success + type: str + region: + description: + - URL of the region where the instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/autoscalers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service.py new file mode 100644 index 000000000..0ad1bcc5c --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service.py @@ -0,0 +1,2144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_backend_service +description: +- A Region Backend Service defines a regionally-scoped group of virtual machines that + will serve traffic for load balancing. +short_description: Creates a GCP RegionBackendService +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + affinity_cookie_ttl_sec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set + to 0, the cookie is non-persistent and lasts only until the end of the browser + session (or equivalent). The maximum allowed value for TTL is one day. + - When the load balancing scheme is INTERNAL, this field is not used. + required: false + type: int + backends: + description: + - The set of backends that serve this RegionBackendService. + elements: dict + required: false + type: list + suboptions: + balancing_mode: + description: + - Specifies the balancing mode for this backend. + - 'Some valid choices include: "UTILIZATION", "RATE", "CONNECTION"' + required: false + default: CONNECTION + type: str + capacity_scaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based on + UTILIZATION, RATE or CONNECTION). + - "~>**NOTE**: This field cannot be set for INTERNAL region backend services + (default loadBalancingScheme), but is required for non-INTERNAL backend + service. The total capacity_scaler for all backends must be non-zero." + - A setting of 0 means the group is completely drained, offering 0% of its + available Capacity. Valid range is [0.0,1.0]. + required: false + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + required: false + type: str + failover: + description: + - This field designates whether this is a failover backend. More than one + failover backend can be configured for a given RegionBackendService. + required: false + type: bool + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group resource. + In case of instance group this defines the list of instances that serve + traffic. Member virtual machine instances from each instance group must + live in the same zone as the instance group itself. No two backends in a + backend service are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group backends. + - When the `load_balancing_scheme` is INTERNAL, only instance groups are supported. + - Note that you must specify an Instance Group or Network Endpoint Group resource + using the fully-qualified URL, rather than a partial URL. + required: true + type: str + max_connections: + description: + - The max number of simultaneous connections for the group. Can be used with + either CONNECTION or UTILIZATION balancing modes. + - Cannot be set for INTERNAL backend services. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + required: false + type: int + max_connections_per_instance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. Cannot be set for INTERNAL backend services. + - This is used to calculate the capacity of the group. + - Can be used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance + must be set. + required: false + type: int + max_connections_per_endpoint: + description: + - The max number of simultaneous connections that a single backend network + endpoint can handle. Cannot be set for INTERNAL backend services. + - This is used to calculate the capacity of the group. Can be used in either + CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections + or maxConnectionsPerEndpoint must be set. + required: false + type: int + max_rate: + description: + - The max requests per second (RPS) of the group. Cannot be set for INTERNAL + backend services. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. Either maxRate or one of maxRatePerInstance or maxRatePerEndpoint, + as appropriate for group type, must be set. + required: false + type: int + max_rate_per_instance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must + be set. Cannot be set for INTERNAL backend services. + required: false + type: str + max_rate_per_endpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint + can handle. This is used to calculate the capacity of the group. Can be + used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. Cannot be set for INTERNAL backend services. + required: false + type: str + max_utilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. Valid range is [0.0, 1.0]. + - Cannot be set for INTERNAL backend services. + required: false + type: str + circuit_breakers: + description: + - Settings controlling the volume of connections to a backend service. This field + is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + required: false + type: dict + suboptions: + max_requests_per_connection: + description: + - Maximum requests for a single backend connection. This parameter is respected + by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there + is no limit. Setting this parameter to 1 will effectively disable keep alive. + required: false + type: int + max_connections: + description: + - The maximum number of connections to the backend cluster. + - Defaults to 1024. + required: false + default: '1024' + type: int + max_pending_requests: + description: + - The maximum number of pending requests to the backend cluster. + - Defaults to 1024. + required: false + default: '1024' + type: int + max_requests: + description: + - The maximum number of parallel requests to the backend cluster. + - Defaults to 1024. + required: false + default: '1024' + type: int + max_retries: + description: + - The maximum number of parallel retries to the backend cluster. + - Defaults to 3. + required: false + default: '3' + type: int + consistent_hash: + description: + - Consistent Hash-based load balancing can be used to provide soft session affinity + based on HTTP headers, cookies or other properties. This load balancing policy + is applicable only for HTTP connections. The affinity to a particular destination + host will be lost when one or more hosts are added/removed from the destination + service. This field specifies parameters that control consistent hashing. + - This field only applies when all of the following are true - * `load_balancing_scheme` + is set to INTERNAL_MANAGED * `protocol` is set to HTTP, HTTPS, or HTTP2 * `locality_lb_policy` + is set to MAGLEV or RING_HASH . + required: false + type: dict + suboptions: + http_cookie: + description: + - Hash is based on HTTP Cookie. This field describes a HTTP cookie that will + be used as the hash key for the consistent hash load balancer. If the cookie + is not present, it will be generated. + - This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + required: false + type: dict + suboptions: + ttl: + description: + - Lifetime of the cookie. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. + - Must be from 0 to 315,576,000,000 inclusive. + required: true + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + name: + description: + - Name of the cookie. + required: false + type: str + path: + description: + - Path to set for the cookie. + required: false + type: str + http_header_name: + description: + - The hash based on the value of the specified header field. + - This field is applicable if the sessionAffinity is set to HEADER_FIELD. + required: false + type: str + minimum_ring_size: + description: + - The minimum number of virtual nodes to use for the hash ring. + - Larger ring sizes result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the ring size, each host + will be assigned a single virtual node. + - Defaults to 1024. + required: false + default: '1024' + type: int + cdn_policy: + description: + - Cloud CDN configuration for this BackendService. + required: false + type: dict + suboptions: + cache_key_policy: + description: + - The CacheKeyPolicy for this CdnPolicy. + required: false + type: dict + suboptions: + include_host: + description: + - If true requests to different hosts will be cached separately. + required: false + type: bool + include_protocol: + description: + - If true, http and https requests will be cached separately. + required: false + type: bool + include_query_string: + description: + - If true, include query string parameters in the cache key according + to query_string_whitelist and query_string_blacklist. If neither is + set, the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + required: false + type: bool + query_string_blacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + elements: str + required: false + type: list + query_string_whitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + elements: str + required: false + type: list + signed_url_cache_max_age_sec: + description: + - Maximum number of seconds the response to a signed URL request will be considered + fresh, defaults to 1hr (3600s). After this time period, the response will + be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control header. + The actual headers served in responses will not be altered.' + required: false + default: '3600' + type: int + default_ttl: + description: + - Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + required: false + type: int + max_ttl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + required: false + type: int + client_ttl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + required: false + type: int + negative_caching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + required: false + type: bool + negative_caching_policy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's + default cache TTLs. + elements: dict + required: false + type: list + suboptions: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes + 300, 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as values, + and you cannot specify a status code more than once. + required: false + type: int + cache_mode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + - 'Some valid choices include: "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"' + required: false + type: str + serve_while_stale: + description: + - Serve existing content from the cache (if available) when revalidating content + with the origin, or when an error is encountered when refreshing the cache. + required: false + type: int + connection_draining: + description: + - Settings for connection draining . + required: false + type: dict + suboptions: + draining_timeout_sec: + description: + - Time for which instance will be drained (not accept new connections, but + still work to finish started). + required: false + default: '300' + type: int + description: + description: + - An optional description of this resource. + required: false + type: str + failover_policy: + description: + - Policy for failovers. + required: false + type: dict + suboptions: + disable_connection_drain_on_failover: + description: + - 'On failover or failback, this field indicates whether connection drain + will be honored. Setting this to true has the following effect: connections + to the old active pool are not drained. Connections to the new active pool + use the timeout of 10 min (currently fixed). Setting to false has the following + effect: both old and new connections will have a drain timeout of 10 min.' + - This can be set to true only if the protocol is TCP. + - The default is false. + required: false + type: bool + drop_traffic_if_unhealthy: + description: + - This option is used only when no healthy VMs are detected in the primary + and backup instance groups. When set to true, traffic is dropped. When set + to false, new connections are sent across all VMs in the primary group. + - The default is false. + required: false + type: bool + failover_ratio: + description: + - The value of the field must be in [0, 1]. If the ratio of the healthy VMs + in the primary backend is at or below this number, traffic arriving at the + load-balanced IP will be directed to the failover backend. + - In case where 'failoverRatio' is not set or all the VMs in the backup backend + are unhealthy, the traffic will be directed back to the primary backend + in the "force" mode, where traffic will be spread to the healthy VMs with + the best effort, or to all VMs when no VM is healthy. + - This field is only used with l4 load balancing. + required: false + type: str + enable_cdn: + description: + - If true, enable Cloud CDN for this RegionBackendService. + required: false + type: bool + health_checks: + description: + - The set of URLs to HealthCheck resources for health checking this RegionBackendService. + Currently at most one health check can be specified. + - A health check must be specified unless the backend service uses an internet + or serverless NEG as a backend. + elements: str + required: false + type: list + load_balancing_scheme: + description: + - Indicates what kind of load balancing this regional backend service will be + used for. A backend service created for one type of load balancing cannot be + used with the other(s). + - 'Some valid choices include: "EXTERNAL", "INTERNAL", "INTERNAL_MANAGED"' + required: false + default: INTERNAL + type: str + locality_lb_policy: + description: + - The load balancing algorithm used within the scope of the locality. + - The possible values are - * ROUND_ROBIN - This is a simple policy in which each + healthy backend is selected in round robin order. + - "* LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts + and picks the host which has fewer active requests." + - "* RING_HASH - The ring/modulo hash load balancer implements consistent hashing + to backends. The algorithm has the property that the addition/removal of a host + from a set of N hosts only affects 1/N of the requests." + - "* RANDOM - The load balancer selects a random healthy host." + - "* ORIGINAL_DESTINATION - Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address as the destination + address of the incoming connection before the connection was redirected to the + load balancer." + - "* MAGLEV - used as a drop in replacement for the ring hash load balancer." + - Maglev is not as stable as ring hash but has faster table lookup build times + and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 + This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + - 'Some valid choices include: "ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", + "ORIGINAL_DESTINATION", "MAGLEV"' + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + outlier_detection: + description: + - Settings controlling eviction of unhealthy hosts from the load balancing pool. + - This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + required: false + type: dict + suboptions: + base_ejection_time: + description: + - The base time that a host is ejected for. The real time is equal to the + base time multiplied by the number of times the host has been ejected. Defaults + to 30000ms or 30s. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` field + and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + required: false + type: int + consecutive_errors: + description: + - Number of errors before a host is ejected from the connection pool. When + the backend host is accessed over HTTP, a 5xx return code qualifies as an + error. + - Defaults to 5. + required: false + default: '5' + type: int + consecutive_gateway_failure: + description: + - The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + required: false + default: '5' + type: int + enforcing_consecutive_errors: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to + disable ejection or to ramp it up slowly. Defaults to 100. + required: false + default: '100' + type: int + enforcing_consecutive_gateway_failure: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting can + be used to disable ejection or to ramp it up slowly. Defaults to 0. + required: false + type: int + enforcing_success_rate: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be + used to disable ejection or to ramp it up slowly. Defaults to 100. + required: false + default: '100' + type: int + interval: + description: + - Time interval between ejection sweep analysis. This can result in both new + ejections as well as hosts being returned to service. Defaults to 10 seconds. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` field + and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. + required: false + type: int + max_ejection_percent: + description: + - Maximum percentage of hosts in the load balancing pool for the backend service + that can be ejected. Defaults to 10%. + required: false + default: '10' + type: int + success_rate_minimum_hosts: + description: + - The number of hosts in a cluster that must have enough request volume to + detect success rate outliers. If the number of hosts is less than this setting, + outlier detection via success rate statistics is not performed for any host + in the cluster. Defaults to 5. + required: false + default: '5' + type: int + success_rate_request_volume: + description: + - The minimum number of total requests that must be collected in one interval + (as defined by the interval duration above) to include this host in success + rate based outlier detection. If the volume is lower than this setting, + outlier detection via success rate statistics is not performed for that + host. Defaults to 100. + required: false + default: '100' + type: int + success_rate_stdev_factor: + description: + - 'This factor is used to determine the ejection threshold for success rate + outlier ejection. The ejection threshold is the difference between the mean + success rate, and the product of this factor and the standard deviation + of the mean success rate: mean - (stdev * success_rate_stdev_factor). This + factor is divided by a thousand to get a double. That is, if the desired + factor is 1.9, the runtime value should be 1900. Defaults to 1900.' + required: false + default: '1900' + type: int + port_name: + description: + - A named port on a backend instance group representing the port for communication + to the backend VMs in that group. Required when the loadBalancingScheme is EXTERNAL, + INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED and the backends are instance groups. + The named port must be defined on each backend instance group. This parameter + has no meaning if the backends are NEGs. API sets a default of "http" if not + given. + - Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load + Balancing). + required: false + type: str + protocol: + description: + - The protocol this RegionBackendService uses to communicate with backends. + - 'The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API.' + - 'Some valid choices include: "HTTP", "HTTPS", "HTTP2", "SSL", "TCP", "UDP", + "GRPC"' + required: false + type: str + session_affinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is not + applicable if the protocol is UDP. + - 'Some valid choices include: "NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", + "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"' + required: false + type: str + timeout_sec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + required: false + type: int + log_config: + description: + - This field denotes the logging options for the load balancer traffic served + by this backend service. + - If logging is enabled, logs will be exported to Stackdriver. + required: false + type: dict + suboptions: + enable: + description: + - Whether to enable logging for the load balancer traffic served by this backend + service. + required: false + type: bool + sample_rate: + description: + - This field can only be specified if logging is enabled for this backend + service. The value of the field must be in [0, 1]. This configures the sampling + rate of requests to the load balancer where 1.0 means all logged requests + are reported and 0.0 means no logged requests are reported. + - The default value is 1.0. + required: false + type: str + network: + description: + - The URL of the network to which this backend service belongs. + - This field can only be specified when the load balancing scheme is set to INTERNAL. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: false + type: dict + region: + description: + - A reference to the region where the regional backend service resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/latest/regionBackendServices)' +- 'Internal TCP/UDP Load Balancing: U(https://cloud.google.com/compute/docs/load-balancing/internal/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a health check + google.cloud.gcp_compute_health_check: + name: "{{ resource_name }}" + type: TCP + tcp_health_check: + port: 80 + check_interval_sec: 1 + timeout_sec: 1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a region backend service + google.cloud.gcp_compute_region_backend_service: + name: test_object + region: us-central1 + health_checks: + - "{{ healthcheck.selfLink }}" + connection_draining: + draining_timeout_sec: 10 + session_affinity: CLIENT_IP + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +affinityCookieTtlSec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set + to 0, the cookie is non-persistent and lasts only until the end of the browser + session (or equivalent). The maximum allowed value for TTL is one day. + - When the load balancing scheme is INTERNAL, this field is not used. + returned: success + type: int +backends: + description: + - The set of backends that serve this RegionBackendService. + returned: success + type: complex + contains: + balancingMode: + description: + - Specifies the balancing mode for this backend. + returned: success + type: str + capacityScaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, + RATE or CONNECTION). + - "~>**NOTE**: This field cannot be set for INTERNAL region backend services + (default loadBalancingScheme), but is required for non-INTERNAL backend service. + The total capacity_scaler for all backends must be non-zero." + - A setting of 0 means the group is completely drained, offering 0% of its available + Capacity. Valid range is [0.0,1.0]. + returned: success + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + returned: success + type: str + failover: + description: + - This field designates whether this is a failover backend. More than one failover + backend can be configured for a given RegionBackendService. + returned: success + type: bool + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group resource. + In case of instance group this defines the list of instances that serve traffic. + Member virtual machine instances from each instance group must live in the + same zone as the instance group itself. No two backends in a backend service + are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group backends. + - When the `load_balancing_scheme` is INTERNAL, only instance groups are supported. + - Note that you must specify an Instance Group or Network Endpoint Group resource + using the fully-qualified URL, rather than a partial URL. + returned: success + type: str + maxConnections: + description: + - The max number of simultaneous connections for the group. Can be used with + either CONNECTION or UTILIZATION balancing modes. + - Cannot be set for INTERNAL backend services. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxConnectionsPerInstance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. Cannot be set for INTERNAL backend services. + - This is used to calculate the capacity of the group. + - Can be used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must + be set. + returned: success + type: int + maxConnectionsPerEndpoint: + description: + - The max number of simultaneous connections that a single backend network endpoint + can handle. Cannot be set for INTERNAL backend services. + - This is used to calculate the capacity of the group. Can be used in either + CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections + or maxConnectionsPerEndpoint must be set. + returned: success + type: int + maxRate: + description: + - The max requests per second (RPS) of the group. Cannot be set for INTERNAL + backend services. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. Either maxRate or one of maxRatePerInstance or maxRatePerEndpoint, + as appropriate for group type, must be set. + returned: success + type: int + maxRatePerInstance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be + set. Cannot be set for INTERNAL backend services. + returned: success + type: str + maxRatePerEndpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint can + handle. This is used to calculate the capacity of the group. Can be used in + either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. Cannot be set for INTERNAL backend services. + returned: success + type: str + maxUtilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. Valid range is [0.0, 1.0]. + - Cannot be set for INTERNAL backend services. + returned: success + type: str +circuitBreakers: + description: + - Settings controlling the volume of connections to a backend service. This field + is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + returned: success + type: complex + contains: + maxRequestsPerConnection: + description: + - Maximum requests for a single backend connection. This parameter is respected + by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there is + no limit. Setting this parameter to 1 will effectively disable keep alive. + returned: success + type: int + maxConnections: + description: + - The maximum number of connections to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxPendingRequests: + description: + - The maximum number of pending requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRequests: + description: + - The maximum number of parallel requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRetries: + description: + - The maximum number of parallel retries to the backend cluster. + - Defaults to 3. + returned: success + type: int +consistentHash: + description: + - Consistent Hash-based load balancing can be used to provide soft session affinity + based on HTTP headers, cookies or other properties. This load balancing policy + is applicable only for HTTP connections. The affinity to a particular destination + host will be lost when one or more hosts are added/removed from the destination + service. This field specifies parameters that control consistent hashing. + - This field only applies when all of the following are true - * `load_balancing_scheme` + is set to INTERNAL_MANAGED * `protocol` is set to HTTP, HTTPS, or HTTP2 * `locality_lb_policy` + is set to MAGLEV or RING_HASH . + returned: success + type: complex + contains: + httpCookie: + description: + - Hash is based on HTTP Cookie. This field describes a HTTP cookie that will + be used as the hash key for the consistent hash load balancer. If the cookie + is not present, it will be generated. + - This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + returned: success + type: complex + contains: + ttl: + description: + - Lifetime of the cookie. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. + - Must be from 0 to 315,576,000,000 inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds field + and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + name: + description: + - Name of the cookie. + returned: success + type: str + path: + description: + - Path to set for the cookie. + returned: success + type: str + httpHeaderName: + description: + - The hash based on the value of the specified header field. + - This field is applicable if the sessionAffinity is set to HEADER_FIELD. + returned: success + type: str + minimumRingSize: + description: + - The minimum number of virtual nodes to use for the hash ring. + - Larger ring sizes result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the ring size, each host + will be assigned a single virtual node. + - Defaults to 1024. + returned: success + type: int +cdnPolicy: + description: + - Cloud CDN configuration for this BackendService. + returned: success + type: complex + contains: + cacheKeyPolicy: + description: + - The CacheKeyPolicy for this CdnPolicy. + returned: success + type: complex + contains: + includeHost: + description: + - If true requests to different hosts will be cached separately. + returned: success + type: bool + includeProtocol: + description: + - If true, http and https requests will be cached separately. + returned: success + type: bool + includeQueryString: + description: + - If true, include query string parameters in the cache key according to + query_string_whitelist and query_string_blacklist. If neither is set, + the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + returned: success + type: bool + queryStringBlacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + queryStringWhitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be considered + fresh, defaults to 1hr (3600s). After this time period, the response will + be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control header. The + actual headers served in responses will not be altered.' + returned: success + type: int + defaultTtl: + description: + - Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + returned: success + type: int + maxTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + clientTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + negativeCaching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + returned: success + type: bool + negativeCachingPolicy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's + default cache TTLs. + returned: success + type: complex + contains: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes 300, + 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as values, + and you cannot specify a status code more than once. + returned: success + type: int + cacheMode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + returned: success + type: str + serveWhileStale: + description: + - Serve existing content from the cache (if available) when revalidating content + with the origin, or when an error is encountered when refreshing the cache. + returned: success + type: int +connectionDraining: + description: + - Settings for connection draining . + returned: success + type: complex + contains: + drainingTimeoutSec: + description: + - Time for which instance will be drained (not accept new connections, but still + work to finish started). + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +failoverPolicy: + description: + - Policy for failovers. + returned: success + type: complex + contains: + disableConnectionDrainOnFailover: + description: + - 'On failover or failback, this field indicates whether connection drain will + be honored. Setting this to true has the following effect: connections to + the old active pool are not drained. Connections to the new active pool use + the timeout of 10 min (currently fixed). Setting to false has the following + effect: both old and new connections will have a drain timeout of 10 min.' + - This can be set to true only if the protocol is TCP. + - The default is false. + returned: success + type: bool + dropTrafficIfUnhealthy: + description: + - This option is used only when no healthy VMs are detected in the primary and + backup instance groups. When set to true, traffic is dropped. When set to + false, new connections are sent across all VMs in the primary group. + - The default is false. + returned: success + type: bool + failoverRatio: + description: + - The value of the field must be in [0, 1]. If the ratio of the healthy VMs + in the primary backend is at or below this number, traffic arriving at the + load-balanced IP will be directed to the failover backend. + - In case where 'failoverRatio' is not set or all the VMs in the backup backend + are unhealthy, the traffic will be directed back to the primary backend in + the "force" mode, where traffic will be spread to the healthy VMs with the + best effort, or to all VMs when no VM is healthy. + - This field is only used with l4 load balancing. + returned: success + type: str +enableCDN: + description: + - If true, enable Cloud CDN for this RegionBackendService. + returned: success + type: bool +fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. + returned: success + type: str +healthChecks: + description: + - The set of URLs to HealthCheck resources for health checking this RegionBackendService. + Currently at most one health check can be specified. + - A health check must be specified unless the backend service uses an internet or + serverless NEG as a backend. + returned: success + type: list +id: + description: + - The unique identifier for the resource. + returned: success + type: int +loadBalancingScheme: + description: + - Indicates what kind of load balancing this regional backend service will be used + for. A backend service created for one type of load balancing cannot be used with + the other(s). + returned: success + type: str +localityLbPolicy: + description: + - The load balancing algorithm used within the scope of the locality. + - The possible values are - * ROUND_ROBIN - This is a simple policy in which each + healthy backend is selected in round robin order. + - "* LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts and + picks the host which has fewer active requests." + - "* RING_HASH - The ring/modulo hash load balancer implements consistent hashing + to backends. The algorithm has the property that the addition/removal of a host + from a set of N hosts only affects 1/N of the requests." + - "* RANDOM - The load balancer selects a random healthy host." + - "* ORIGINAL_DESTINATION - Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address as the destination + address of the incoming connection before the connection was redirected to the + load balancer." + - "* MAGLEV - used as a drop in replacement for the ring hash load balancer." + - Maglev is not as stable as ring hash but has faster table lookup build times and + host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 + This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +outlierDetection: + description: + - Settings controlling eviction of unhealthy hosts from the load balancing pool. + - This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + returned: success + type: complex + contains: + baseEjectionTime: + description: + - The base time that a host is ejected for. The real time is equal to the base + time multiplied by the number of times the host has been ejected. Defaults + to 30000ms or 30s. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + consecutiveErrors: + description: + - Number of errors before a host is ejected from the connection pool. When the + backend host is accessed over HTTP, a 5xx return code qualifies as an error. + - Defaults to 5. + returned: success + type: int + consecutiveGatewayFailure: + description: + - The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + returned: success + type: int + enforcingConsecutiveErrors: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to disable + ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + enforcingConsecutiveGatewayFailure: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting can + be used to disable ejection or to ramp it up slowly. Defaults to 0. + returned: success + type: int + enforcingSuccessRate: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be used + to disable ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + interval: + description: + - Time interval between ejection sweep analysis. This can result in both new + ejections as well as hosts being returned to service. Defaults to 10 seconds. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + maxEjectionPercent: + description: + - Maximum percentage of hosts in the load balancing pool for the backend service + that can be ejected. Defaults to 10%. + returned: success + type: int + successRateMinimumHosts: + description: + - The number of hosts in a cluster that must have enough request volume to detect + success rate outliers. If the number of hosts is less than this setting, outlier + detection via success rate statistics is not performed for any host in the + cluster. Defaults to 5. + returned: success + type: int + successRateRequestVolume: + description: + - The minimum number of total requests that must be collected in one interval + (as defined by the interval duration above) to include this host in success + rate based outlier detection. If the volume is lower than this setting, outlier + detection via success rate statistics is not performed for that host. Defaults + to 100. + returned: success + type: int + successRateStdevFactor: + description: + - 'This factor is used to determine the ejection threshold for success rate + outlier ejection. The ejection threshold is the difference between the mean + success rate, and the product of this factor and the standard deviation of + the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor + is divided by a thousand to get a double. That is, if the desired factor is + 1.9, the runtime value should be 1900. Defaults to 1900.' + returned: success + type: int +portName: + description: + - A named port on a backend instance group representing the port for communication + to the backend VMs in that group. Required when the loadBalancingScheme is EXTERNAL, + INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED and the backends are instance groups. + The named port must be defined on each backend instance group. This parameter + has no meaning if the backends are NEGs. API sets a default of "http" if not given. + - Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load + Balancing). + returned: success + type: str +protocol: + description: + - The protocol this RegionBackendService uses to communicate with backends. + - 'The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API.' + returned: success + type: str +sessionAffinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is not + applicable if the protocol is UDP. + returned: success + type: str +timeoutSec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + returned: success + type: int +logConfig: + description: + - This field denotes the logging options for the load balancer traffic served by + this backend service. + - If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: complex + contains: + enable: + description: + - Whether to enable logging for the load balancer traffic served by this backend + service. + returned: success + type: bool + sampleRate: + description: + - This field can only be specified if logging is enabled for this backend service. + The value of the field must be in [0, 1]. This configures the sampling rate + of requests to the load balancer where 1.0 means all logged requests are reported + and 0.0 means no logged requests are reported. + - The default value is 1.0. + returned: success + type: str +network: + description: + - The URL of the network to which this backend service belongs. + - This field can only be specified when the load balancing scheme is set to INTERNAL. + returned: success + type: dict +region: + description: + - A reference to the region where the regional backend service resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + affinity_cookie_ttl_sec=dict(type='int'), + backends=dict( + type='list', + elements='dict', + options=dict( + balancing_mode=dict(default='CONNECTION', type='str'), + capacity_scaler=dict(type='str'), + description=dict(type='str'), + failover=dict(type='bool'), + group=dict(required=True, type='str'), + max_connections=dict(type='int'), + max_connections_per_instance=dict(type='int'), + max_connections_per_endpoint=dict(type='int'), + max_rate=dict(type='int'), + max_rate_per_instance=dict(type='str'), + max_rate_per_endpoint=dict(type='str'), + max_utilization=dict(type='str'), + ), + ), + circuit_breakers=dict( + type='dict', + options=dict( + max_requests_per_connection=dict(type='int'), + max_connections=dict(default=1024, type='int'), + max_pending_requests=dict(default=1024, type='int'), + max_requests=dict(default=1024, type='int'), + max_retries=dict(default=3, type='int'), + ), + ), + consistent_hash=dict( + type='dict', + options=dict( + http_cookie=dict( + type='dict', + options=dict( + ttl=dict(type='dict', options=dict(seconds=dict(required=True, type='int'), nanos=dict(type='int'))), + name=dict(type='str'), + path=dict(type='str'), + ), + ), + http_header_name=dict(type='str'), + minimum_ring_size=dict(default=1024, type='int'), + ), + ), + cdn_policy=dict( + type='dict', + options=dict( + cache_key_policy=dict( + type='dict', + options=dict( + include_host=dict(type='bool'), + include_protocol=dict(type='bool'), + include_query_string=dict(type='bool'), + query_string_blacklist=dict(type='list', elements='str'), + query_string_whitelist=dict(type='list', elements='str'), + ), + ), + signed_url_cache_max_age_sec=dict(default=3600, type='int'), + default_ttl=dict(type='int'), + max_ttl=dict(type='int'), + client_ttl=dict(type='int'), + negative_caching=dict(type='bool'), + negative_caching_policy=dict(type='list', elements='dict', options=dict(code=dict(type='int'))), + cache_mode=dict(type='str'), + serve_while_stale=dict(type='int'), + ), + ), + connection_draining=dict(type='dict', options=dict(draining_timeout_sec=dict(default=300, type='int'))), + description=dict(type='str'), + failover_policy=dict( + type='dict', + options=dict( + disable_connection_drain_on_failover=dict(type='bool'), drop_traffic_if_unhealthy=dict(type='bool'), failover_ratio=dict(type='str') + ), + ), + enable_cdn=dict(type='bool'), + health_checks=dict(type='list', elements='str'), + load_balancing_scheme=dict(default='INTERNAL', type='str'), + locality_lb_policy=dict(type='str'), + name=dict(required=True, type='str'), + outlier_detection=dict( + type='dict', + options=dict( + base_ejection_time=dict(type='dict', options=dict(seconds=dict(required=True, type='int'), nanos=dict(type='int'))), + consecutive_errors=dict(default=5, type='int'), + consecutive_gateway_failure=dict(default=5, type='int'), + enforcing_consecutive_errors=dict(default=100, type='int'), + enforcing_consecutive_gateway_failure=dict(default=0, type='int'), + enforcing_success_rate=dict(default=100, type='int'), + interval=dict(type='dict', options=dict(seconds=dict(required=True, type='int'), nanos=dict(type='int'))), + max_ejection_percent=dict(default=10, type='int'), + success_rate_minimum_hosts=dict(default=5, type='int'), + success_rate_request_volume=dict(default=100, type='int'), + success_rate_stdev_factor=dict(default=1900, type='int'), + ), + ), + port_name=dict(type='str'), + protocol=dict(type='str'), + session_affinity=dict(type='str'), + timeout_sec=dict(type='int'), + log_config=dict(type='dict', options=dict(enable=dict(type='bool'), sample_rate=dict(type='str'))), + network=dict(type='dict'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#backendService' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#backendService', + u'affinityCookieTtlSec': module.params.get('affinity_cookie_ttl_sec'), + u'backends': RegionBackendServiceBackendsArray(module.params.get('backends', []), module).to_request(), + u'circuitBreakers': RegionBackendServiceCircuitbreakers(module.params.get('circuit_breakers', {}), module).to_request(), + u'consistentHash': RegionBackendServiceConsistenthash(module.params.get('consistent_hash', {}), module).to_request(), + u'cdnPolicy': RegionBackendServiceCdnpolicy(module.params.get('cdn_policy', {}), module).to_request(), + u'connectionDraining': RegionBackendServiceConnectiondraining(module.params.get('connection_draining', {}), module).to_request(), + u'description': module.params.get('description'), + u'failoverPolicy': RegionBackendServiceFailoverpolicy(module.params.get('failover_policy', {}), module).to_request(), + u'enableCDN': module.params.get('enable_cdn'), + u'healthChecks': module.params.get('health_checks'), + u'loadBalancingScheme': module.params.get('load_balancing_scheme'), + u'localityLbPolicy': module.params.get('locality_lb_policy'), + u'name': module.params.get('name'), + u'outlierDetection': RegionBackendServiceOutlierdetection(module.params.get('outlier_detection', {}), module).to_request(), + u'portName': module.params.get('port_name'), + u'protocol': module.params.get('protocol'), + u'sessionAffinity': module.params.get('session_affinity'), + u'timeoutSec': module.params.get('timeout_sec'), + u'logConfig': RegionBackendServiceLogconfig(module.params.get('log_config', {}), module).to_request(), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'affinityCookieTtlSec': response.get(u'affinityCookieTtlSec'), + u'backends': RegionBackendServiceBackendsArray(response.get(u'backends', []), module).from_response(), + u'circuitBreakers': RegionBackendServiceCircuitbreakers(response.get(u'circuitBreakers', {}), module).from_response(), + u'consistentHash': RegionBackendServiceConsistenthash(response.get(u'consistentHash', {}), module).from_response(), + u'cdnPolicy': RegionBackendServiceCdnpolicy(response.get(u'cdnPolicy', {}), module).from_response(), + u'connectionDraining': RegionBackendServiceConnectiondraining(response.get(u'connectionDraining', {}), module).from_response(), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'failoverPolicy': RegionBackendServiceFailoverpolicy(response.get(u'failoverPolicy', {}), module).from_response(), + u'enableCDN': response.get(u'enableCDN'), + u'fingerprint': response.get(u'fingerprint'), + u'healthChecks': response.get(u'healthChecks'), + u'id': response.get(u'id'), + u'loadBalancingScheme': module.params.get('load_balancing_scheme'), + u'localityLbPolicy': response.get(u'localityLbPolicy'), + u'name': module.params.get('name'), + u'outlierDetection': RegionBackendServiceOutlierdetection(response.get(u'outlierDetection', {}), module).from_response(), + u'portName': response.get(u'portName'), + u'protocol': response.get(u'protocol'), + u'sessionAffinity': response.get(u'sessionAffinity'), + u'timeoutSec': response.get(u'timeoutSec'), + u'logConfig': RegionBackendServiceLogconfig(response.get(u'logConfig', {}), module).from_response(), + u'network': response.get(u'network'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#backendService') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RegionBackendServiceBackendsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'balancingMode': item.get('balancing_mode'), + u'capacityScaler': item.get('capacity_scaler'), + u'description': item.get('description'), + u'failover': item.get('failover'), + u'group': item.get('group'), + u'maxConnections': item.get('max_connections'), + u'maxConnectionsPerInstance': item.get('max_connections_per_instance'), + u'maxConnectionsPerEndpoint': item.get('max_connections_per_endpoint'), + u'maxRate': item.get('max_rate'), + u'maxRatePerInstance': item.get('max_rate_per_instance'), + u'maxRatePerEndpoint': item.get('max_rate_per_endpoint'), + u'maxUtilization': item.get('max_utilization'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'balancingMode': item.get(u'balancingMode'), + u'capacityScaler': item.get(u'capacityScaler'), + u'description': item.get(u'description'), + u'failover': item.get(u'failover'), + u'group': item.get(u'group'), + u'maxConnections': item.get(u'maxConnections'), + u'maxConnectionsPerInstance': item.get(u'maxConnectionsPerInstance'), + u'maxConnectionsPerEndpoint': item.get(u'maxConnectionsPerEndpoint'), + u'maxRate': item.get(u'maxRate'), + u'maxRatePerInstance': item.get(u'maxRatePerInstance'), + u'maxRatePerEndpoint': item.get(u'maxRatePerEndpoint'), + u'maxUtilization': item.get(u'maxUtilization'), + } + ) + + +class RegionBackendServiceCircuitbreakers(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'maxRequestsPerConnection': self.request.get('max_requests_per_connection'), + u'maxConnections': self.request.get('max_connections'), + u'maxPendingRequests': self.request.get('max_pending_requests'), + u'maxRequests': self.request.get('max_requests'), + u'maxRetries': self.request.get('max_retries'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'maxRequestsPerConnection': self.request.get(u'maxRequestsPerConnection'), + u'maxConnections': self.request.get(u'maxConnections'), + u'maxPendingRequests': self.request.get(u'maxPendingRequests'), + u'maxRequests': self.request.get(u'maxRequests'), + u'maxRetries': self.request.get(u'maxRetries'), + } + ) + + +class RegionBackendServiceConsistenthash(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'httpCookie': RegionBackendServiceHttpcookie(self.request.get('http_cookie', {}), self.module).to_request(), + u'httpHeaderName': self.request.get('http_header_name'), + u'minimumRingSize': self.request.get('minimum_ring_size'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'httpCookie': RegionBackendServiceHttpcookie(self.request.get(u'httpCookie', {}), self.module).from_response(), + u'httpHeaderName': self.request.get(u'httpHeaderName'), + u'minimumRingSize': self.request.get(u'minimumRingSize'), + } + ) + + +class RegionBackendServiceHttpcookie(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'ttl': RegionBackendServiceTtl(self.request.get('ttl', {}), self.module).to_request(), + u'name': self.request.get('name'), + u'path': self.request.get('path'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'ttl': RegionBackendServiceTtl(self.request.get(u'ttl', {}), self.module).from_response(), + u'name': self.request.get(u'name'), + u'path': self.request.get(u'path'), + } + ) + + +class RegionBackendServiceTtl(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class RegionBackendServiceCdnpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'cacheKeyPolicy': RegionBackendServiceCachekeypolicy(self.request.get('cache_key_policy', {}), self.module).to_request(), + u'signedUrlCacheMaxAgeSec': self.request.get('signed_url_cache_max_age_sec'), + u'defaultTtl': self.request.get('default_ttl'), + u'maxTtl': self.request.get('max_ttl'), + u'clientTtl': self.request.get('client_ttl'), + u'negativeCaching': self.request.get('negative_caching'), + u'negativeCachingPolicy': RegionBackendServiceNegativecachingpolicyArray( + self.request.get('negative_caching_policy', []), self.module + ).to_request(), + u'cacheMode': self.request.get('cache_mode'), + u'serveWhileStale': self.request.get('serve_while_stale'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'cacheKeyPolicy': RegionBackendServiceCachekeypolicy(self.request.get(u'cacheKeyPolicy', {}), self.module).from_response(), + u'signedUrlCacheMaxAgeSec': self.request.get(u'signedUrlCacheMaxAgeSec'), + u'defaultTtl': self.request.get(u'defaultTtl'), + u'maxTtl': self.request.get(u'maxTtl'), + u'clientTtl': self.request.get(u'clientTtl'), + u'negativeCaching': self.request.get(u'negativeCaching'), + u'negativeCachingPolicy': RegionBackendServiceNegativecachingpolicyArray( + self.request.get(u'negativeCachingPolicy', []), self.module + ).from_response(), + u'cacheMode': self.request.get(u'cacheMode'), + u'serveWhileStale': self.request.get(u'serveWhileStale'), + } + ) + + +class RegionBackendServiceCachekeypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'includeHost': self.request.get('include_host'), + u'includeProtocol': self.request.get('include_protocol'), + u'includeQueryString': self.request.get('include_query_string'), + u'queryStringBlacklist': self.request.get('query_string_blacklist'), + u'queryStringWhitelist': self.request.get('query_string_whitelist'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'includeHost': self.request.get(u'includeHost'), + u'includeProtocol': self.request.get(u'includeProtocol'), + u'includeQueryString': self.request.get(u'includeQueryString'), + u'queryStringBlacklist': self.request.get(u'queryStringBlacklist'), + u'queryStringWhitelist': self.request.get(u'queryStringWhitelist'), + } + ) + + +class RegionBackendServiceNegativecachingpolicyArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'code': item.get('code')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'code': item.get(u'code')}) + + +class RegionBackendServiceConnectiondraining(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'drainingTimeoutSec': self.request.get('draining_timeout_sec')}) + + def from_response(self): + return remove_nones_from_dict({u'drainingTimeoutSec': self.request.get(u'drainingTimeoutSec')}) + + +class RegionBackendServiceFailoverpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'disableConnectionDrainOnFailover': self.request.get('disable_connection_drain_on_failover'), + u'dropTrafficIfUnhealthy': self.request.get('drop_traffic_if_unhealthy'), + u'failoverRatio': self.request.get('failover_ratio'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'disableConnectionDrainOnFailover': self.request.get(u'disableConnectionDrainOnFailover'), + u'dropTrafficIfUnhealthy': self.request.get(u'dropTrafficIfUnhealthy'), + u'failoverRatio': self.request.get(u'failoverRatio'), + } + ) + + +class RegionBackendServiceOutlierdetection(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'baseEjectionTime': RegionBackendServiceBaseejectiontime(self.request.get('base_ejection_time', {}), self.module).to_request(), + u'consecutiveErrors': self.request.get('consecutive_errors'), + u'consecutiveGatewayFailure': self.request.get('consecutive_gateway_failure'), + u'enforcingConsecutiveErrors': self.request.get('enforcing_consecutive_errors'), + u'enforcingConsecutiveGatewayFailure': self.request.get('enforcing_consecutive_gateway_failure'), + u'enforcingSuccessRate': self.request.get('enforcing_success_rate'), + u'interval': RegionBackendServiceInterval(self.request.get('interval', {}), self.module).to_request(), + u'maxEjectionPercent': self.request.get('max_ejection_percent'), + u'successRateMinimumHosts': self.request.get('success_rate_minimum_hosts'), + u'successRateRequestVolume': self.request.get('success_rate_request_volume'), + u'successRateStdevFactor': self.request.get('success_rate_stdev_factor'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'baseEjectionTime': RegionBackendServiceBaseejectiontime(self.request.get(u'baseEjectionTime', {}), self.module).from_response(), + u'consecutiveErrors': self.request.get(u'consecutiveErrors'), + u'consecutiveGatewayFailure': self.request.get(u'consecutiveGatewayFailure'), + u'enforcingConsecutiveErrors': self.request.get(u'enforcingConsecutiveErrors'), + u'enforcingConsecutiveGatewayFailure': self.request.get(u'enforcingConsecutiveGatewayFailure'), + u'enforcingSuccessRate': self.request.get(u'enforcingSuccessRate'), + u'interval': RegionBackendServiceInterval(self.request.get(u'interval', {}), self.module).from_response(), + u'maxEjectionPercent': self.request.get(u'maxEjectionPercent'), + u'successRateMinimumHosts': self.request.get(u'successRateMinimumHosts'), + u'successRateRequestVolume': self.request.get(u'successRateRequestVolume'), + u'successRateStdevFactor': self.request.get(u'successRateStdevFactor'), + } + ) + + +class RegionBackendServiceBaseejectiontime(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class RegionBackendServiceInterval(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class RegionBackendServiceLogconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enable': self.request.get('enable'), u'sampleRate': self.request.get('sample_rate')}) + + def from_response(self): + return remove_nones_from_dict({u'enable': self.request.get(u'enable'), u'sampleRate': self.request.get(u'sampleRate')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service_info.py new file mode 100644 index 000000000..74bea5cb0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_backend_service_info.py @@ -0,0 +1,841 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_backend_service_info +description: +- Gather info for GCP RegionBackendService +short_description: Gather info for GCP RegionBackendService +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - A reference to the region where the regional backend service resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region backend service + gcp_compute_region_backend_service_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + affinityCookieTtlSec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If + set to 0, the cookie is non-persistent and lasts only until the end of the + browser session (or equivalent). The maximum allowed value for TTL is one + day. + - When the load balancing scheme is INTERNAL, this field is not used. + returned: success + type: int + backends: + description: + - The set of backends that serve this RegionBackendService. + returned: success + type: complex + contains: + balancingMode: + description: + - Specifies the balancing mode for this backend. + returned: success + type: str + capacityScaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based + on UTILIZATION, RATE or CONNECTION). + - "~>**NOTE**: This field cannot be set for INTERNAL region backend services + (default loadBalancingScheme), but is required for non-INTERNAL backend + service. The total capacity_scaler for all backends must be non-zero." + - A setting of 0 means the group is completely drained, offering 0% of its + available Capacity. Valid range is [0.0,1.0]. + returned: success + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + returned: success + type: str + failover: + description: + - This field designates whether this is a failover backend. More than one + failover backend can be configured for a given RegionBackendService. + returned: success + type: bool + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group + resource. In case of instance group this defines the list of instances + that serve traffic. Member virtual machine instances from each instance + group must live in the same zone as the instance group itself. No two + backends in a backend service are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group + backends. + - When the `load_balancing_scheme` is INTERNAL, only instance groups are + supported. + - Note that you must specify an Instance Group or Network Endpoint Group + resource using the fully-qualified URL, rather than a partial URL. + returned: success + type: str + maxConnections: + description: + - The max number of simultaneous connections for the group. Can be used + with either CONNECTION or UTILIZATION balancing modes. + - Cannot be set for INTERNAL backend services. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxConnectionsPerInstance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. Cannot be set for INTERNAL backend services. + - This is used to calculate the capacity of the group. + - Can be used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance + must be set. + returned: success + type: int + maxConnectionsPerEndpoint: + description: + - The max number of simultaneous connections that a single backend network + endpoint can handle. Cannot be set for INTERNAL backend services. + - This is used to calculate the capacity of the group. Can be used in either + CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either + maxConnections or maxConnectionsPerEndpoint must be set. + returned: success + type: int + maxRate: + description: + - The max requests per second (RPS) of the group. Cannot be set for INTERNAL + backend services. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. Either maxRate or one of maxRatePerInstance or maxRatePerEndpoint, + as appropriate for group type, must be set. + returned: success + type: int + maxRatePerInstance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must + be set. Cannot be set for INTERNAL backend services. + returned: success + type: str + maxRatePerEndpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint + can handle. This is used to calculate the capacity of the group. Can be + used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. Cannot be set for INTERNAL backend services. + returned: success + type: str + maxUtilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. Valid range is [0.0, 1.0]. + - Cannot be set for INTERNAL backend services. + returned: success + type: str + circuitBreakers: + description: + - Settings controlling the volume of connections to a backend service. This + field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + returned: success + type: complex + contains: + maxRequestsPerConnection: + description: + - Maximum requests for a single backend connection. This parameter is respected + by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there + is no limit. Setting this parameter to 1 will effectively disable keep + alive. + returned: success + type: int + maxConnections: + description: + - The maximum number of connections to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxPendingRequests: + description: + - The maximum number of pending requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRequests: + description: + - The maximum number of parallel requests to the backend cluster. + - Defaults to 1024. + returned: success + type: int + maxRetries: + description: + - The maximum number of parallel retries to the backend cluster. + - Defaults to 3. + returned: success + type: int + consistentHash: + description: + - Consistent Hash-based load balancing can be used to provide soft session affinity + based on HTTP headers, cookies or other properties. This load balancing policy + is applicable only for HTTP connections. The affinity to a particular destination + host will be lost when one or more hosts are added/removed from the destination + service. This field specifies parameters that control consistent hashing. + - This field only applies when all of the following are true - * `load_balancing_scheme` + is set to INTERNAL_MANAGED * `protocol` is set to HTTP, HTTPS, or HTTP2 * + `locality_lb_policy` is set to MAGLEV or RING_HASH . + returned: success + type: complex + contains: + httpCookie: + description: + - Hash is based on HTTP Cookie. This field describes a HTTP cookie that + will be used as the hash key for the consistent hash load balancer. If + the cookie is not present, it will be generated. + - This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + returned: success + type: complex + contains: + ttl: + description: + - Lifetime of the cookie. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. + - Must be from 0 to 315,576,000,000 inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + name: + description: + - Name of the cookie. + returned: success + type: str + path: + description: + - Path to set for the cookie. + returned: success + type: str + httpHeaderName: + description: + - The hash based on the value of the specified header field. + - This field is applicable if the sessionAffinity is set to HEADER_FIELD. + returned: success + type: str + minimumRingSize: + description: + - The minimum number of virtual nodes to use for the hash ring. + - Larger ring sizes result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the ring size, each + host will be assigned a single virtual node. + - Defaults to 1024. + returned: success + type: int + cdnPolicy: + description: + - Cloud CDN configuration for this BackendService. + returned: success + type: complex + contains: + cacheKeyPolicy: + description: + - The CacheKeyPolicy for this CdnPolicy. + returned: success + type: complex + contains: + includeHost: + description: + - If true requests to different hosts will be cached separately. + returned: success + type: bool + includeProtocol: + description: + - If true, http and https requests will be cached separately. + returned: success + type: bool + includeQueryString: + description: + - If true, include query string parameters in the cache key according + to query_string_whitelist and query_string_blacklist. If neither is + set, the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + returned: success + type: bool + queryStringBlacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + queryStringWhitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be + considered fresh, defaults to 1hr (3600s). After this time period, the + response will be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: + public, max-age=[TTL]" header, regardless of any existing Cache-Control + header. The actual headers served in responses will not be altered.' + returned: success + type: int + defaultTtl: + description: + - Specifies the default TTL for cached content served by this origin for + responses that do not have an existing valid TTL (max-age or s-max-age). + returned: success + type: int + maxTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + clientTtl: + description: + - Specifies the maximum allowed TTL for cached content served by this origin. + returned: success + type: int + negativeCaching: + description: + - Negative caching allows per-status code TTLs to be set, in order to apply + fine-grained caching for common errors or redirects. + returned: success + type: bool + negativeCachingPolicy: + description: + - Sets a cache TTL for the specified HTTP status code. negativeCaching must + be enabled to configure negativeCachingPolicy. + - Omitting the policy and leaving negativeCaching enabled will use Cloud + CDN's default cache TTLs. + returned: success + type: complex + contains: + code: + description: + - The HTTP status code to define a TTL against. Only HTTP status codes + 300, 301, 308, 404, 405, 410, 421, 451 and 501 can be specified as + values, and you cannot specify a status code more than once. + returned: success + type: int + cacheMode: + description: + - Specifies the cache setting for all responses from this backend. + - 'The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + .' + returned: success + type: str + serveWhileStale: + description: + - Serve existing content from the cache (if available) when revalidating + content with the origin, or when an error is encountered when refreshing + the cache. + returned: success + type: int + connectionDraining: + description: + - Settings for connection draining . + returned: success + type: complex + contains: + drainingTimeoutSec: + description: + - Time for which instance will be drained (not accept new connections, but + still work to finish started). + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + failoverPolicy: + description: + - Policy for failovers. + returned: success + type: complex + contains: + disableConnectionDrainOnFailover: + description: + - 'On failover or failback, this field indicates whether connection drain + will be honored. Setting this to true has the following effect: connections + to the old active pool are not drained. Connections to the new active + pool use the timeout of 10 min (currently fixed). Setting to false has + the following effect: both old and new connections will have a drain timeout + of 10 min.' + - This can be set to true only if the protocol is TCP. + - The default is false. + returned: success + type: bool + dropTrafficIfUnhealthy: + description: + - This option is used only when no healthy VMs are detected in the primary + and backup instance groups. When set to true, traffic is dropped. When + set to false, new connections are sent across all VMs in the primary group. + - The default is false. + returned: success + type: bool + failoverRatio: + description: + - The value of the field must be in [0, 1]. If the ratio of the healthy + VMs in the primary backend is at or below this number, traffic arriving + at the load-balanced IP will be directed to the failover backend. + - In case where 'failoverRatio' is not set or all the VMs in the backup + backend are unhealthy, the traffic will be directed back to the primary + backend in the "force" mode, where traffic will be spread to the healthy + VMs with the best effort, or to all VMs when no VM is healthy. + - This field is only used with l4 load balancing. + returned: success + type: str + enableCDN: + description: + - If true, enable Cloud CDN for this RegionBackendService. + returned: success + type: bool + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + healthChecks: + description: + - The set of URLs to HealthCheck resources for health checking this RegionBackendService. + Currently at most one health check can be specified. + - A health check must be specified unless the backend service uses an internet + or serverless NEG as a backend. + returned: success + type: list + id: + description: + - The unique identifier for the resource. + returned: success + type: int + loadBalancingScheme: + description: + - Indicates what kind of load balancing this regional backend service will be + used for. A backend service created for one type of load balancing cannot + be used with the other(s). + returned: success + type: str + localityLbPolicy: + description: + - The load balancing algorithm used within the scope of the locality. + - The possible values are - * ROUND_ROBIN - This is a simple policy in which + each healthy backend is selected in round robin order. + - "* LEAST_REQUEST - An O(1) algorithm which selects two random healthy hosts + and picks the host which has fewer active requests." + - "* RING_HASH - The ring/modulo hash load balancer implements consistent hashing + to backends. The algorithm has the property that the addition/removal of a + host from a set of N hosts only affects 1/N of the requests." + - "* RANDOM - The load balancer selects a random healthy host." + - "* ORIGINAL_DESTINATION - Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address as the destination + address of the incoming connection before the connection was redirected to + the load balancer." + - "* MAGLEV - used as a drop in replacement for the ring hash load balancer." + - Maglev is not as stable as ring hash but has faster table lookup build times + and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 + This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + outlierDetection: + description: + - Settings controlling eviction of unhealthy hosts from the load balancing pool. + - This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + returned: success + type: complex + contains: + baseEjectionTime: + description: + - The base time that a host is ejected for. The real time is equal to the + base time multiplied by the number of times the host has been ejected. + Defaults to 30000ms or 30s. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + consecutiveErrors: + description: + - Number of errors before a host is ejected from the connection pool. When + the backend host is accessed over HTTP, a 5xx return code qualifies as + an error. + - Defaults to 5. + returned: success + type: int + consecutiveGatewayFailure: + description: + - The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + returned: success + type: int + enforcingConsecutiveErrors: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to + disable ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + enforcingConsecutiveGatewayFailure: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting + can be used to disable ejection or to ramp it up slowly. Defaults to 0. + returned: success + type: int + enforcingSuccessRate: + description: + - The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be + used to disable ejection or to ramp it up slowly. Defaults to 100. + returned: success + type: int + interval: + description: + - Time interval between ejection sweep analysis. This can result in both + new ejections as well as hosts being returned to service. Defaults to + 10 seconds. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: int + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + maxEjectionPercent: + description: + - Maximum percentage of hosts in the load balancing pool for the backend + service that can be ejected. Defaults to 10%. + returned: success + type: int + successRateMinimumHosts: + description: + - The number of hosts in a cluster that must have enough request volume + to detect success rate outliers. If the number of hosts is less than this + setting, outlier detection via success rate statistics is not performed + for any host in the cluster. Defaults to 5. + returned: success + type: int + successRateRequestVolume: + description: + - The minimum number of total requests that must be collected in one interval + (as defined by the interval duration above) to include this host in success + rate based outlier detection. If the volume is lower than this setting, + outlier detection via success rate statistics is not performed for that + host. Defaults to 100. + returned: success + type: int + successRateStdevFactor: + description: + - 'This factor is used to determine the ejection threshold for success rate + outlier ejection. The ejection threshold is the difference between the + mean success rate, and the product of this factor and the standard deviation + of the mean success rate: mean - (stdev * success_rate_stdev_factor). + This factor is divided by a thousand to get a double. That is, if the + desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.' + returned: success + type: int + portName: + description: + - A named port on a backend instance group representing the port for communication + to the backend VMs in that group. Required when the loadBalancingScheme is + EXTERNAL, INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED and the backends are + instance groups. The named port must be defined on each backend instance group. + This parameter has no meaning if the backends are NEGs. API sets a default + of "http" if not given. + - Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP + Load Balancing). + returned: success + type: str + protocol: + description: + - The protocol this RegionBackendService uses to communicate with backends. + - 'The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API.' + returned: success + type: str + sessionAffinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is + not applicable if the protocol is UDP. + returned: success + type: str + timeoutSec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + returned: success + type: int + logConfig: + description: + - This field denotes the logging options for the load balancer traffic served + by this backend service. + - If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: complex + contains: + enable: + description: + - Whether to enable logging for the load balancer traffic served by this + backend service. + returned: success + type: bool + sampleRate: + description: + - This field can only be specified if logging is enabled for this backend + service. The value of the field must be in [0, 1]. This configures the + sampling rate of requests to the load balancer where 1.0 means all logged + requests are reported and 0.0 means no logged requests are reported. + - The default value is 1.0. + returned: success + type: str + network: + description: + - The URL of the network to which this backend service belongs. + - This field can only be specified when the load balancing scheme is set to + INTERNAL. + returned: success + type: dict + region: + description: + - A reference to the region where the regional backend service resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk.py new file mode 100644 index 000000000..17d128562 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk.py @@ -0,0 +1,681 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_disk +description: +- Persistent disks are durable storage devices that function similarly to the physical + disks in a desktop or a server. Compute Engine manages the hardware behind these + devices to ensure data redundancy and optimize performance for you. Persistent disks + are available as either standard hard disk drives (HDD) or solid-state drives (SSD). +- Persistent disks are located independently from your virtual machine instances, + so you can detach or move persistent disks to keep your data even after you delete + your instances. Persistent disk performance scales automatically with size, so you + can resize your existing persistent disks or add more persistent disks to an instance + to meet your performance and storage space requirements. +- Add a persistent disk to your instance when you need reliable and affordable storage + with consistent performance characteristics. +short_description: Creates a GCP RegionDisk +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + required: false + type: dict + licenses: + description: + - Any applicable publicly visible licenses. + elements: str + required: false + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + size_gb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of the + snapshot. + required: false + type: int + physical_block_size_bytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a request, + a default value is used. Currently supported sizes are 4096 and 16384, other + sizes may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + required: false + type: int + replica_zones: + description: + - URLs of the zones where the disk should be replicated to. + elements: str + required: true + type: list + type: + description: + - URL of the disk type resource describing which disk type to use to create the + disk. Provide this when creating the disk. + required: false + type: str + region: + description: + - A reference to the region where the disk resides. + required: true + type: str + disk_encryption_key: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the + same key if you use the disk later (e.g. to create a disk snapshot or an image, + or to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need to + provide a key to use the disk later. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + source_snapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + - 'This field represents a link to a Snapshot resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_snapshot task and then set this source_snapshot field to "{{ + name-of-resource }}"' + required: false + type: dict + source_snapshot_encryption_key: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/regionDisks)' +- 'Adding or Resizing Regional Persistent Disks: U(https://cloud.google.com/compute/docs/disks/regional-persistent-disk)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a region disk + google.cloud.gcp_compute_region_disk: + name: test_object + size_gb: 500 + disk_encryption_key: + raw_key: SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0= + region: us-central1 + replica_zones: + - https://www.googleapis.com/compute/v1/projects/google.com:graphite-playground/zones/us-central1-a + - https://www.googleapis.com/compute/v1/projects/google.com:graphite-playground/zones/us-central1-b + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +lastAttachTimestamp: + description: + - Last attach timestamp in RFC3339 text format. + returned: success + type: str +lastDetachTimestamp: + description: + - Last detach timestamp in RFC3339 text format. + returned: success + type: str +labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + returned: success + type: dict +licenses: + description: + - Any applicable publicly visible licenses. + returned: success + type: list +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +sizeGb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of the + snapshot. + returned: success + type: int +users: + description: + - 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + .' + returned: success + type: list +physicalBlockSizeBytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a request, + a default value is used. Currently supported sizes are 4096 and 16384, other sizes + may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + returned: success + type: int +replicaZones: + description: + - URLs of the zones where the disk should be replicated to. + returned: success + type: list +type: + description: + - URL of the disk type resource describing which disk type to use to create the + disk. Provide this when creating the disk. + returned: success + type: str +region: + description: + - A reference to the region where the disk resides. + returned: success + type: str +diskEncryptionKey: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the same + key if you use the disk later (e.g. to create a disk snapshot or an image, or + to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need to + provide a key to use the disk later. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str +sourceSnapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + returned: success + type: dict +sourceSnapshotEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the source + snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str +sourceSnapshotId: + description: + - The unique ID of the snapshot used to create this disk. This value identifies + the exact snapshot that was used to create this persistent disk. For example, + if you created the persistent disk from a snapshot that was later deleted and + recreated under the same name, the source snapshot ID would identify the exact + version of the snapshot that was used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + labels=dict(type='dict'), + licenses=dict(type='list', elements='str'), + name=dict(required=True, type='str'), + size_gb=dict(type='int'), + physical_block_size_bytes=dict(type='int'), + replica_zones=dict(required=True, type='list', elements='str'), + type=dict(type='str'), + region=dict(required=True, type='str'), + disk_encryption_key=dict(type='dict', no_log=True, options=dict(raw_key=dict(type='str'))), + source_snapshot=dict(type='dict'), + source_snapshot_encryption_key=dict(type='dict', no_log=True, options=dict(raw_key=dict(type='str'))), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#disk' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('labels') != request.get('labels'): + label_fingerprint_update(module, request, response) + if response.get('sizeGb') != request.get('sizeGb'): + size_gb_update(module, request, response) + + +def label_fingerprint_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/disks/{name}/setLabels"]).format(**module.params), + {u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')}, + ) + + +def size_gb_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/disks/{name}/resize"]).format(**module.params), + {u'sizeGb': module.params.get('size_gb')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#disk', + u'diskEncryptionKey': RegionDiskDiskencryptionkey(module.params.get('disk_encryption_key', {}), module).to_request(), + u'sourceSnapshotEncryptionKey': RegionDiskSourcesnapshotencryptionkey(module.params.get('source_snapshot_encryption_key', {}), module).to_request(), + u'description': module.params.get('description'), + u'labels': module.params.get('labels'), + u'licenses': module.params.get('licenses'), + u'name': module.params.get('name'), + u'sizeGb': module.params.get('size_gb'), + u'physicalBlockSizeBytes': module.params.get('physical_block_size_bytes'), + u'replicaZones': module.params.get('replica_zones'), + u'type': region_disk_type_selflink(module.params.get('type'), module.params), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'labelFingerprint': response.get(u'labelFingerprint'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'lastAttachTimestamp': response.get(u'lastAttachTimestamp'), + u'lastDetachTimestamp': response.get(u'lastDetachTimestamp'), + u'labels': response.get(u'labels'), + u'licenses': response.get(u'licenses'), + u'name': module.params.get('name'), + u'sizeGb': response.get(u'sizeGb'), + u'users': response.get(u'users'), + u'physicalBlockSizeBytes': response.get(u'physicalBlockSizeBytes'), + u'replicaZones': response.get(u'replicaZones'), + u'type': response.get(u'type'), + } + + +def zone_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/zones/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/%s".format(**params) % name + return name + + +def region_disk_type_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*/diskTypes/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/diskTypes/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#disk') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RegionDiskDiskencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) + + +class RegionDiskSourcesnapshotencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) + + def from_response(self): + return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk_info.py new file mode 100644 index 000000000..648b4b874 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_disk_info.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_disk_info +description: +- Gather info for GCP RegionDisk +short_description: Gather info for GCP RegionDisk +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - A reference to the region where the disk resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region disk + gcp_compute_region_disk_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + lastAttachTimestamp: + description: + - Last attach timestamp in RFC3339 text format. + returned: success + type: str + lastDetachTimestamp: + description: + - Last detach timestamp in RFC3339 text format. + returned: success + type: str + labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + returned: success + type: dict + licenses: + description: + - Any applicable publicly visible licenses. + returned: success + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sizeGb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of + the snapshot. + returned: success + type: int + users: + description: + - 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + .' + returned: success + type: list + physicalBlockSizeBytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a + request, a default value is used. Currently supported sizes are 4096 and 16384, + other sizes may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + returned: success + type: int + replicaZones: + description: + - URLs of the zones where the disk should be replicated to. + returned: success + type: list + type: + description: + - URL of the disk type resource describing which disk type to use to create + the disk. Provide this when creating the disk. + returned: success + type: str + region: + description: + - A reference to the region where the disk resides. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the + same key if you use the disk later (e.g. to create a disk snapshot or an image, + or to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the + disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need + to provide a key to use the disk later. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceSnapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + returned: success + type: dict + sourceSnapshotEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceSnapshotId: + description: + - The unique ID of the snapshot used to create this disk. This value identifies + the exact snapshot that was used to create this persistent disk. For example, + if you created the persistent disk from a snapshot that was later deleted + and recreated under the same name, the source snapshot ID would identify the + exact version of the snapshot that was used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check.py new file mode 100644 index 000000000..745f9a579 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check.py @@ -0,0 +1,1424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_health_check +description: +- Health Checks determine whether instances are responsive and able to do work. +- They are an important part of a comprehensive load balancing configuration, as they + enable monitoring instances behind load balancers. +- Health Checks poll instances at a specified interval. Instances that do not respond + successfully to some number of probes in a row are marked as unhealthy. No new connections + are sent to unhealthy instances, though existing connections will continue. The + health check will continue to poll unhealthy instances. If an instance later responds + successfully to some number of consecutive probes, it is marked healthy again and + can receive new connections. +short_description: Creates a GCP RegionHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + check_interval_sec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + required: false + default: '5' + type: int + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + healthy_threshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + required: false + default: '2' + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + unhealthy_threshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + required: false + default: '2' + type: int + timeout_sec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + required: false + default: '5' + type: int + type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not + specified, the default is TCP. Exactly one of the protocol-specific health check + field must be specified, which must match type field. + - 'Some valid choices include: "TCP", "SSL", "HTTP", "HTTPS", "HTTP2"' + required: false + type: str + http_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + required: false + type: str + request_path: + description: + - The request path of the HTTP health check request. + - The default value is /. + required: false + default: "/" + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + https_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + required: false + type: str + request_path: + description: + - The request path of the HTTPS health check request. + - The default value is /. + required: false + default: "/" + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + tcp_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + required: false + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 80. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, TCP health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + ssl_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + required: false + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, SSL health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + http2_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + required: false + type: str + request_path: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + required: false + default: "/" + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + required: false + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + default: NONE + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + grpc_health_check: + description: + - A nested object resource. + required: false + type: dict + suboptions: + port: + description: + - The port number for the health check request. + - Must be specified if portName and portSpecification are not set or if port_specification + is USE_FIXED_PORT. Valid values are 1 through 65535. + required: false + type: int + port_name: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + required: false + type: str + port_specification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health + checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health checking." + - If not specified, gRPC health check follows behavior specified in `port` + and `portName` fields. + - 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"' + required: false + type: str + grpc_service_name: + description: + - The gRPC service name for the health check. + - 'The value of grpcServiceName has the following meanings by convention: + * Empty serviceName means the overall status of all services at the backend.' + - "* Non-empty serviceName means the health of that gRPC service, as defined + by the owner of the service." + - The grpcServiceName can only be ASCII. + required: false + type: str + log_config: + description: + - Configure logging on this health check. + required: false + type: dict + suboptions: + enable: + description: + - Indicates whether or not to export logs. This is false by default, which + means no health check logging will be done. + required: false + default: 'false' + type: bool + region: + description: + - The region where the regional health check resides. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/regionHealthChecks)' +- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/health-checks)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a region health check + google.cloud.gcp_compute_region_health_check: + name: test_object + type: TCP + tcp_health_check: + port_name: service-health + request: ping + response: pong + healthy_threshold: 10 + timeout_sec: 2 + unhealthy_threshold: 5 + region: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater value + than checkIntervalSec. + returned: success + type: int +type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not + specified, the default is TCP. Exactly one of the protocol-specific health check + field must be specified, which must match type field. + returned: success + type: str +httpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str +httpsHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str +tcpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, TCP health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str +sslHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the connection + establishment alone will indicate health. The request data can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, SSL health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str +http2HealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str +grpcHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + port: + description: + - The port number for the health check request. + - Must be specified if portName and portSpecification are not set or if port_specification + is USE_FIXED_PORT. Valid values are 1 through 65535. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name + are defined, port takes precedence. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the following + values: * `USE_FIXED_PORT`: The port number in `port` is used for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the port + or named port specified in the Backend Service is used for health checking." + - If not specified, gRPC health check follows behavior specified in `port` and + `portName` fields. + returned: success + type: str + grpcServiceName: + description: + - The gRPC service name for the health check. + - 'The value of grpcServiceName has the following meanings by convention: * + Empty serviceName means the overall status of all services at the backend.' + - "* Non-empty serviceName means the health of that gRPC service, as defined + by the owner of the service." + - The grpcServiceName can only be ASCII. + returned: success + type: str +logConfig: + description: + - Configure logging on this health check. + returned: success + type: complex + contains: + enable: + description: + - Indicates whether or not to export logs. This is false by default, which means + no health check logging will be done. + returned: success + type: bool +region: + description: + - The region where the regional health check resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + check_interval_sec=dict(default=5, type='int'), + description=dict(type='str'), + healthy_threshold=dict(default=2, type='int'), + name=dict(required=True, type='str'), + unhealthy_threshold=dict(default=2, type='int'), + timeout_sec=dict(default=5, type='int'), + type=dict(type='str'), + http_health_check=dict( + type='dict', + options=dict( + host=dict(type='str'), + request_path=dict(default='/', type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + https_health_check=dict( + type='dict', + options=dict( + host=dict(type='str'), + request_path=dict(default='/', type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + tcp_health_check=dict( + type='dict', + options=dict( + request=dict(type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + ssl_health_check=dict( + type='dict', + options=dict( + request=dict(type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + http2_health_check=dict( + type='dict', + options=dict( + host=dict(type='str'), + request_path=dict(default='/', type='str'), + response=dict(type='str'), + port=dict(type='int'), + port_name=dict(type='str'), + proxy_header=dict(default='NONE', type='str'), + port_specification=dict(type='str'), + ), + ), + grpc_health_check=dict( + type='dict', + options=dict(port=dict(type='int'), port_name=dict(type='str'), port_specification=dict(type='str'), grpc_service_name=dict(type='str')), + ), + log_config=dict(type='dict', options=dict(enable=dict(type='bool'))), + region=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#healthCheck' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#healthCheck', + u'region': region_selflink(module.params.get('region'), module.params), + u'checkIntervalSec': module.params.get('check_interval_sec'), + u'description': module.params.get('description'), + u'healthyThreshold': module.params.get('healthy_threshold'), + u'name': module.params.get('name'), + u'unhealthyThreshold': module.params.get('unhealthy_threshold'), + u'timeoutSec': module.params.get('timeout_sec'), + u'type': module.params.get('type'), + u'httpHealthCheck': RegionHealthCheckHttphealthcheck(module.params.get('http_health_check', {}), module).to_request(), + u'httpsHealthCheck': RegionHealthCheckHttpshealthcheck(module.params.get('https_health_check', {}), module).to_request(), + u'tcpHealthCheck': RegionHealthCheckTcphealthcheck(module.params.get('tcp_health_check', {}), module).to_request(), + u'sslHealthCheck': RegionHealthCheckSslhealthcheck(module.params.get('ssl_health_check', {}), module).to_request(), + u'http2HealthCheck': RegionHealthCheckHttp2healthcheck(module.params.get('http2_health_check', {}), module).to_request(), + u'grpcHealthCheck': RegionHealthCheckGrpchealthcheck(module.params.get('grpc_health_check', {}), module).to_request(), + u'logConfig': RegionHealthCheckLogconfig(module.params.get('log_config', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/healthChecks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/healthChecks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'checkIntervalSec': response.get(u'checkIntervalSec'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'healthyThreshold': response.get(u'healthyThreshold'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'unhealthyThreshold': response.get(u'unhealthyThreshold'), + u'timeoutSec': response.get(u'timeoutSec'), + u'type': response.get(u'type'), + u'httpHealthCheck': RegionHealthCheckHttphealthcheck(response.get(u'httpHealthCheck', {}), module).from_response(), + u'httpsHealthCheck': RegionHealthCheckHttpshealthcheck(response.get(u'httpsHealthCheck', {}), module).from_response(), + u'tcpHealthCheck': RegionHealthCheckTcphealthcheck(response.get(u'tcpHealthCheck', {}), module).from_response(), + u'sslHealthCheck': RegionHealthCheckSslhealthcheck(response.get(u'sslHealthCheck', {}), module).from_response(), + u'http2HealthCheck': RegionHealthCheckHttp2healthcheck(response.get(u'http2HealthCheck', {}), module).from_response(), + u'grpcHealthCheck': RegionHealthCheckGrpchealthcheck(response.get(u'grpcHealthCheck', {}), module).from_response(), + u'logConfig': RegionHealthCheckLogconfig(response.get(u'logConfig', {}), module).from_response(), + } + + +def region_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1/projects/.*/regions/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#healthCheck') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RegionHealthCheckHttphealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'host': self.request.get('host'), + u'requestPath': self.request.get('request_path'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'host': self.request.get(u'host'), + u'requestPath': self.request.get(u'requestPath'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class RegionHealthCheckHttpshealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'host': self.request.get('host'), + u'requestPath': self.request.get('request_path'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'host': self.request.get(u'host'), + u'requestPath': self.request.get(u'requestPath'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class RegionHealthCheckTcphealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'request': self.request.get('request'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'request': self.request.get(u'request'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class RegionHealthCheckSslhealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'request': self.request.get('request'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'request': self.request.get(u'request'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class RegionHealthCheckHttp2healthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'host': self.request.get('host'), + u'requestPath': self.request.get('request_path'), + u'response': self.request.get('response'), + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'proxyHeader': self.request.get('proxy_header'), + u'portSpecification': self.request.get('port_specification'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'host': self.request.get(u'host'), + u'requestPath': self.request.get(u'requestPath'), + u'response': self.request.get(u'response'), + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'proxyHeader': self.request.get(u'proxyHeader'), + u'portSpecification': self.request.get(u'portSpecification'), + } + ) + + +class RegionHealthCheckGrpchealthcheck(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'port': self.request.get('port'), + u'portName': self.request.get('port_name'), + u'portSpecification': self.request.get('port_specification'), + u'grpcServiceName': self.request.get('grpc_service_name'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'port': self.request.get(u'port'), + u'portName': self.request.get(u'portName'), + u'portSpecification': self.request.get(u'portSpecification'), + u'grpcServiceName': self.request.get(u'grpcServiceName'), + } + ) + + +class RegionHealthCheckLogconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enable': self.request.get('enable')}) + + def from_response(self): + return remove_nones_from_dict({u'enable': self.request.get(u'enable')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check_info.py new file mode 100644 index 000000000..d9d427694 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_health_check_info.py @@ -0,0 +1,592 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_health_check_info +description: +- Gather info for GCP RegionHealthCheck +short_description: Gather info for GCP RegionHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region where the regional health check resides. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region health check + gcp_compute_region_health_check_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If + not specified, the default is TCP. Exactly one of the protocol-specific health + check field must be specified, which must match type field. + returned: success + type: str + httpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTP health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + httpsHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + tcpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the + connection establishment alone will indicate health. The request data + can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, TCP health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + sslHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the + connection establishment alone will indicate health. The request data + can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, SSL health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + http2HealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + grpcHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + port: + description: + - The port number for the health check request. + - Must be specified if portName and portSpecification are not set or if + port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, gRPC health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + grpcServiceName: + description: + - The gRPC service name for the health check. + - 'The value of grpcServiceName has the following meanings by convention: + * Empty serviceName means the overall status of all services at the backend.' + - "* Non-empty serviceName means the health of that gRPC service, as defined + by the owner of the service." + - The grpcServiceName can only be ASCII. + returned: success + type: str + logConfig: + description: + - Configure logging on this health check. + returned: success + type: complex + contains: + enable: + description: + - Indicates whether or not to export logs. This is false by default, which + means no health check logging will be done. + returned: success + type: bool + region: + description: + - The region where the regional health check resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/healthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager.py new file mode 100644 index 000000000..c2f77b8d5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager.py @@ -0,0 +1,679 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_instance_group_manager +description: +- Creates a managed instance group using the information that you specify in the request. + After the group is created, it schedules an action to create instances in the group + using the specified instance template. This operation is marked as DONE when the + group is created even if the instances in the group have not yet been created. You + must separately verify the status of the individual instances. +- A managed instance group can have up to 1000 VM instances per group. +short_description: Creates a GCP RegionInstanceGroupManager +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + base_instance_name: + description: + - The base instance name to use for instances in this group. The value must be + 1-58 characters long. Instances are named by appending a hyphen and a random + four-character string to the base instance name. + - The base instance name must comply with RFC1035. + required: true + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + instance_template: + description: + - The instance template that is specified for this managed instance group. The + group uses this template to create all new instances in the managed instance + group. + - 'This field represents a link to a InstanceTemplate resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_instance_template task and then set this + instance_template field to "{{ name-of-resource }}"' + required: true + type: dict + name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + required: true + type: str + named_ports: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + elements: dict + required: false + type: list + suboptions: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and + comply with RFC1035. + required: false + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + required: false + type: int + target_pools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed + instance group. + elements: dict + required: false + type: list + target_size: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this + number. + required: false + type: int + auto_healing_policies: + description: + - The autohealing policy for this managed instance group . + elements: dict + required: false + type: list + suboptions: + health_check: + description: + - The URL for the health check that signals autohealing. + required: false + type: str + initial_delay_sec: + description: + - The number of seconds that the managed instance group waits before it applies + autohealing policies to new instances or recently recreated instances . + required: false + type: int + region: + description: + - The region the managed instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instancetemplate + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a address + google.cloud.gcp_compute_address: + name: address-instancetemplate + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a instance template + google.cloud.gcp_compute_instance_template: + name: "{{ resource_name }}" + properties: + disks: + - auto_delete: 'true' + boot: 'true' + initialize_params: + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + machine_type: n1-standard-1 + network_interfaces: + - network: "{{ network }}" + access_configs: + - name: test-config + type: ONE_TO_ONE_NAT + nat_ip: "{{ address }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancetemplate + +- name: create a region instance group manager + google.cloud.gcp_compute_region_instance_group_manager: + name: test_object + base_instance_name: test1-child + region: us-central1 + instance_template: "{{ instancetemplate }}" + target_size: 3 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +baseInstanceName: + description: + - The base instance name to use for instances in this group. The value must be 1-58 + characters long. Instances are named by appending a hyphen and a random four-character + string to the base instance name. + - The base instance name must comply with RFC1035. + returned: success + type: str +creationTimestamp: + description: + - The creation timestamp for this managed instance group in RFC3339 text format. + returned: success + type: str +currentActions: + description: + - The list of instance actions and the number of instances in this managed instance + group that are scheduled for each of those actions. + returned: success + type: complex + contains: + abandoning: + description: + - The total number of instances in the managed instance group that are scheduled + to be abandoned. Abandoning an instance removes it from the managed instance + group without deleting it. + returned: success + type: int + creating: + description: + - The number of instances in the managed instance group that are scheduled to + be created or are currently being created. If the group fails to create any + of these instances, it tries again until it creates the instance successfully. + - If you have disabled creation retries, this field will not be populated; instead, + the creatingWithoutRetries field will be populated. + returned: success + type: int + creatingWithoutRetries: + description: + - The number of instances that the managed instance group will attempt to create. + The group attempts to create each instance only once. If the group fails to + create any of these instances, it decreases the group's targetSize value accordingly. + returned: success + type: int + deleting: + description: + - The number of instances in the managed instance group that are scheduled to + be deleted or are currently being deleted. + returned: success + type: int + none: + description: + - The number of instances in the managed instance group that are running and + have no scheduled actions. + returned: success + type: int + recreating: + description: + - The number of instances in the managed instance group that are scheduled to + be recreated or are currently being being recreated. + - Recreating an instance deletes the existing root persistent disk and creates + a new disk from the image that is defined in the instance template. + returned: success + type: int + refreshing: + description: + - The number of instances in the managed instance group that are being reconfigured + with properties that do not require a restart or a recreate action. For example, + setting or removing target pools for the instance. + returned: success + type: int + restarting: + description: + - The number of instances in the managed instance group that are scheduled to + be restarted or are currently being restarted. + returned: success + type: int +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - A unique identifier for this resource. + returned: success + type: int +instanceGroup: + description: + - The instance group being managed. + returned: success + type: dict +instanceTemplate: + description: + - The instance template that is specified for this managed instance group. The group + uses this template to create all new instances in the managed instance group. + returned: success + type: dict +name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + returned: success + type: str +namedPorts: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + returned: success + type: complex + contains: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and comply + with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int +targetPools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed instance + group. + returned: success + type: list +targetSize: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this number. + returned: success + type: int +autoHealingPolicies: + description: + - The autohealing policy for this managed instance group . + returned: success + type: complex + contains: + healthCheck: + description: + - The URL for the health check that signals autohealing. + returned: success + type: str + initialDelaySec: + description: + - The number of seconds that the managed instance group waits before it applies + autohealing policies to new instances or recently recreated instances . + returned: success + type: int +region: + description: + - The region the managed instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + base_instance_name=dict(required=True, type='str'), + description=dict(type='str'), + instance_template=dict(required=True, type='dict'), + name=dict(required=True, type='str'), + named_ports=dict(type='list', elements='dict', options=dict(name=dict(type='str'), port=dict(type='int'))), + target_pools=dict(type='list', elements='dict'), + target_size=dict(type='int'), + auto_healing_policies=dict(type='list', elements='dict', options=dict(health_check=dict(type='str'), initial_delay_sec=dict(type='int'))), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#instanceGroupManager' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#instanceGroupManager', + u'baseInstanceName': module.params.get('base_instance_name'), + u'description': module.params.get('description'), + u'instanceTemplate': replace_resource_dict(module.params.get(u'instance_template', {}), 'selfLink'), + u'name': module.params.get('name'), + u'namedPorts': RegionInstanceGroupManagerNamedportsArray(module.params.get('named_ports', []), module).to_request(), + u'targetPools': replace_resource_dict(module.params.get('target_pools', []), 'selfLink'), + u'targetSize': module.params.get('target_size'), + u'autoHealingPolicies': RegionInstanceGroupManagerAutohealingpoliciesArray(module.params.get('auto_healing_policies', []), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'baseInstanceName': response.get(u'baseInstanceName'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'currentActions': RegionInstanceGroupManagerCurrentactions(response.get(u'currentActions', {}), module).from_response(), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'instanceGroup': response.get(u'instanceGroup'), + u'instanceTemplate': response.get(u'instanceTemplate'), + u'name': response.get(u'name'), + u'namedPorts': RegionInstanceGroupManagerNamedportsArray(response.get(u'namedPorts', []), module).from_response(), + u'targetPools': response.get(u'targetPools'), + u'targetSize': response.get(u'targetSize'), + u'autoHealingPolicies': RegionInstanceGroupManagerAutohealingpoliciesArray(response.get(u'autoHealingPolicies', []), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroupManager') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RegionInstanceGroupManagerCurrentactions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({}) + + def from_response(self): + return remove_nones_from_dict({}) + + +class RegionInstanceGroupManagerNamedportsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'port': item.get('port')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'port': item.get(u'port')}) + + +class RegionInstanceGroupManagerAutohealingpoliciesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'healthCheck': item.get('health_check'), u'initialDelaySec': item.get('initial_delay_sec')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'healthCheck': item.get(u'healthCheck'), u'initialDelaySec': item.get(u'initialDelaySec')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager_info.py new file mode 100644 index 000000000..b32014973 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_instance_group_manager_info.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_instance_group_manager_info +description: +- Gather info for GCP RegionInstanceGroupManager +short_description: Gather info for GCP RegionInstanceGroupManager +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region the managed instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region instance group manager + gcp_compute_region_instance_group_manager_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + baseInstanceName: + description: + - The base instance name to use for instances in this group. The value must + be 1-58 characters long. Instances are named by appending a hyphen and a random + four-character string to the base instance name. + - The base instance name must comply with RFC1035. + returned: success + type: str + creationTimestamp: + description: + - The creation timestamp for this managed instance group in RFC3339 text format. + returned: success + type: str + currentActions: + description: + - The list of instance actions and the number of instances in this managed instance + group that are scheduled for each of those actions. + returned: success + type: complex + contains: + abandoning: + description: + - The total number of instances in the managed instance group that are scheduled + to be abandoned. Abandoning an instance removes it from the managed instance + group without deleting it. + returned: success + type: int + creating: + description: + - The number of instances in the managed instance group that are scheduled + to be created or are currently being created. If the group fails to create + any of these instances, it tries again until it creates the instance successfully. + - If you have disabled creation retries, this field will not be populated; + instead, the creatingWithoutRetries field will be populated. + returned: success + type: int + creatingWithoutRetries: + description: + - The number of instances that the managed instance group will attempt to + create. The group attempts to create each instance only once. If the group + fails to create any of these instances, it decreases the group's targetSize + value accordingly. + returned: success + type: int + deleting: + description: + - The number of instances in the managed instance group that are scheduled + to be deleted or are currently being deleted. + returned: success + type: int + none: + description: + - The number of instances in the managed instance group that are running + and have no scheduled actions. + returned: success + type: int + recreating: + description: + - The number of instances in the managed instance group that are scheduled + to be recreated or are currently being being recreated. + - Recreating an instance deletes the existing root persistent disk and creates + a new disk from the image that is defined in the instance template. + returned: success + type: int + refreshing: + description: + - The number of instances in the managed instance group that are being reconfigured + with properties that do not require a restart or a recreate action. For + example, setting or removing target pools for the instance. + returned: success + type: int + restarting: + description: + - The number of instances in the managed instance group that are scheduled + to be restarted or are currently being restarted. + returned: success + type: int + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - A unique identifier for this resource. + returned: success + type: int + instanceGroup: + description: + - The instance group being managed. + returned: success + type: dict + instanceTemplate: + description: + - The instance template that is specified for this managed instance group. The + group uses this template to create all new instances in the managed instance + group. + returned: success + type: dict + name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + returned: success + type: str + namedPorts: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + returned: success + type: complex + contains: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and + comply with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int + targetPools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed + instance group. + returned: success + type: list + targetSize: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this + number. + returned: success + type: int + autoHealingPolicies: + description: + - The autohealing policy for this managed instance group . + returned: success + type: complex + contains: + healthCheck: + description: + - The URL for the health check that signals autohealing. + returned: success + type: str + initialDelaySec: + description: + - The number of seconds that the managed instance group waits before it + applies autohealing policies to new instances or recently recreated instances + . + returned: success + type: int + region: + description: + - The region the managed instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy.py new file mode 100644 index 000000000..ec2709564 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy.py @@ -0,0 +1,436 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_target_http_proxy +description: +- Represents a RegionTargetHttpProxy resource, which is used by one or more forwarding + rules to route incoming HTTP requests to a URL map. +short_description: Creates a GCP RegionTargetHttpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + url_map: + description: + - A reference to the RegionUrlMap resource that defines the mapping from URL to + the BackendService. + - 'This field represents a link to a RegionUrlMap resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_region_url_map task and then set this url_map field to "{{ + name-of-resource }}"' + required: true + type: dict + region: + description: + - The region where the regional proxy resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetHttpProxies)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-targethttpproxy + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: httphealthcheck-targethttpproxy + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-targethttpproxy + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + enable_cdn: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a URL map + google.cloud.gcp_compute_url_map: + name: urlmap-targethttpproxy + default_service: "{{ backendservice }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: urlmap + +- name: create a region target HTTP proxy + google.cloud.gcp_compute_region_target_http_proxy: + name: test_object + region: us-central1 + url_map: "{{ urlmap }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +urlMap: + description: + - A reference to the RegionUrlMap resource that defines the mapping from URL to + the BackendService. + returned: success + type: dict +region: + description: + - The region where the regional proxy resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + url_map=dict(required=True, type='dict'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetHttpProxy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('urlMap') != request.get('urlMap'): + url_map_update(module, request, response) + + +def url_map_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/targetHttpProxies/{name}/setUrlMap"]).format( + **module.params + ), + {u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetHttpProxy', + u'region': module.params.get('region'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetHttpProxies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'urlMap': response.get(u'urlMap'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetHttpProxy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy_info.py new file mode 100644 index 000000000..3ef0366ad --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_http_proxy_info.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_target_http_proxy_info +description: +- Gather info for GCP RegionTargetHttpProxy +short_description: Gather info for GCP RegionTargetHttpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region where the regional proxy resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region target HTTP proxy + gcp_compute_region_target_http_proxy_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + urlMap: + description: + - A reference to the RegionUrlMap resource that defines the mapping from URL + to the BackendService. + returned: success + type: dict + region: + description: + - The region where the regional proxy resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetHttpProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy.py new file mode 100644 index 000000000..478563398 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy.py @@ -0,0 +1,504 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_target_https_proxy +description: +- Represents a RegionTargetHttpsProxy resource, which is used by one or more forwarding + rules to route incoming HTTPS requests to a URL map. +short_description: Creates a GCP RegionTargetHttpsProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + ssl_certificates: + description: + - A list of RegionSslCertificate resources that are used to authenticate connections + between users and the load balancer. Currently, exactly one SSL certificate + must be specified. + elements: dict + required: true + type: list + url_map: + description: + - A reference to the RegionUrlMap resource that defines the mapping from URL to + the RegionBackendService. + - 'This field represents a link to a RegionUrlMap resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_region_url_map task and then set this url_map field to "{{ + name-of-resource }}"' + required: true + type: dict + region: + description: + - The region where the regional proxy resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetHttpsProxies)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-targethttpsproxy + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a region health check + google.cloud.gcp_compute_region_health_check: + name: "{{ resource_name }}" + type: HTTPS + healthy_threshold: 10 + timeout_sec: 2 + unhealthy_threshold: 5 + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a region backend service + google.cloud.gcp_compute_region_backend_service: + name: backendservice-targethttpsproxy + region: us-central1 + backends: + - group: "{{ instancegroup.selfLink }}" + healthchecks: + - "{{ healthcheck.selfLink }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a region URL map + google.cloud.gcp_compute_region_url_map: + name: urlmap-targethttpsproxy + region: us-central1 + default_service: "{{ backendservice }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: urlmap + +- name: create a SSL certificate + google.cloud.gcp_compute_ssl_certificate: + name: sslcert-targethttpsproxy + description: A certificate for testing. Do not use this certificate in production + certificate: |- + -----BEGIN CERTIFICATE----- + MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG + EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT + BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm + b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN + AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2 + MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP + BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM + FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z + aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH + KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ + 4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O + BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn + 0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O + M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ + zqGNhIPGq2ULqXKK8BY= + -----END CERTIFICATE----- + private_key: |- + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49 + AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f + OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ== + -----END EC PRIVATE KEY----- + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: sslcert + +- name: create a region target HTTPS proxy + google.cloud.gcp_compute_region_target_https_proxy: + name: test_object + region: us-central1 + ssl_certificates: + - "{{ sslcert }}" + url_map: "{{ urlmap }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +sslCertificates: + description: + - A list of RegionSslCertificate resources that are used to authenticate connections + between users and the load balancer. Currently, exactly one SSL certificate must + be specified. + returned: success + type: list +urlMap: + description: + - A reference to the RegionUrlMap resource that defines the mapping from URL to + the RegionBackendService. + returned: success + type: dict +region: + description: + - The region where the regional proxy resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + ssl_certificates=dict(required=True, type='list', elements='dict'), + url_map=dict(required=True, type='dict'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetHttpsProxy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('sslCertificates') != request.get('sslCertificates'): + ssl_certificates_update(module, request, response) + if response.get('urlMap') != request.get('urlMap'): + url_map_update(module, request, response) + + +def ssl_certificates_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/targetHttpsProxies/{name}/setSslCertificates"]).format( + **module.params + ), + {u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink')}, + ) + + +def url_map_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/targetHttpsProxies/{name}/setUrlMap"]).format( + **module.params + ), + {u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetHttpsProxy', + u'region': module.params.get('region'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink'), + u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'sslCertificates': response.get(u'sslCertificates'), + u'urlMap': response.get(u'urlMap'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetHttpsProxy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy_info.py new file mode 100644 index 000000000..0af28904c --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_target_https_proxy_info.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_target_https_proxy_info +description: +- Gather info for GCP RegionTargetHttpsProxy +short_description: Gather info for GCP RegionTargetHttpsProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region where the regional proxy resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region target HTTPS proxy + gcp_compute_region_target_https_proxy_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sslCertificates: + description: + - A list of RegionSslCertificate resources that are used to authenticate connections + between users and the load balancer. Currently, exactly one SSL certificate + must be specified. + returned: success + type: list + urlMap: + description: + - A reference to the RegionUrlMap resource that defines the mapping from URL + to the RegionBackendService. + returned: success + type: dict + region: + description: + - The region where the regional proxy resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map.py new file mode 100644 index 000000000..ac46b1ca8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map.py @@ -0,0 +1,4984 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_url_map +description: +- UrlMaps are used to route requests to a backend service based on rules that you + define for the host and path of an incoming URL. +short_description: Creates a GCP RegionUrlMap +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + default_service: + description: + - The full or partial URL of the defaultService resource to which traffic is directed + if none of the hostRules match. If defaultRouteAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if defaultService is specified, defaultRouteAction + cannot contain any weightedBackendServices. Conversely, if routeAction specifies + any weightedBackendServices, service must not be specified. Only one of defaultService, + defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. + - 'This field represents a link to a RegionBackendService resource in GCP. It + can be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_region_backend_service task and then set + this default_service field to "{{ name-of-resource }}"' + required: false + type: dict + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + host_rules: + description: + - The list of HostRules to use against the URL. + elements: dict + required: false + type: list + suboptions: + description: + description: + - An optional description of this HostRule. Provide this property when you + create the resource. + required: false + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except + * will match any string of ([a-z0-9-.]*). In that case, * must be the first + character and must be followed in the pattern by either - or . + elements: str + required: true + type: list + path_matcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL + if the hostRule matches the URL's host portion. + required: true + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + path_matchers: + description: + - The list of named PathMatchers to use against the URL. + elements: dict + required: false + type: list + suboptions: + default_service: + description: + - A reference to a RegionBackendService resource. This will be used if none + of the pathRules defined by this PathMatcher is matched by the URL's path + portion. + - 'This field represents a link to a RegionBackendService resource in GCP. + It can be specified in two ways. First, you can place a dictionary with + key ''selfLink'' and value of your resource''s selfLink Alternatively, you + can add `register: name-of-resource` to a gcp_compute_region_backend_service + task and then set this default_service field to "{{ name-of-resource }}"' + required: false + type: dict + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + required: true + type: str + route_rules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order + of specifying routeRules matters: the first rule that matches will cause + its specified routing action to take effect. Within a given pathMatcher, + only one of pathRules or routeRules must be set. routeRules are not supported + in UrlMaps intended for External load balancers.' + elements: dict + required: false + type: list + suboptions: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the order + in which load balancer will interpret routeRules. RouteRules are evaluated + in order of priority, from the lowest to highest number. The priority + of a rule decreases as its number increases (1, 2, 3, N+1). The first + rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove rules + in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which + you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in + the future without any impact on existing rules. + required: true + type: int + service: + description: + - The region backend service resource to which traffic is directed if + this rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + - 'This field represents a link to a RegionBackendService resource in + GCP. It can be specified in two ways. First, you can place a dictionary + with key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_region_backend_service + task and then set this service field to "{{ name-of-resource }}"' + required: false + type: dict + header_action: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. The headerAction specified here + are applied before the matching pathMatchers[].headerAction and after + pathMatchers[].routeRules[].r outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the header, + discarding any values that were set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response back to + the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the header, + discarding any values that were set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + elements: str + required: false + type: list + match_rules: + description: + - The rules for determining a match. + elements: dict + required: false + type: list + suboptions: + full_path_match: + description: + - For satisfying the matchRule condition, the path of the request + must exactly match the value specified in fullPathMatch after removing + any query parameters and anchor that may be part of the original + URL. FullPathMatch must be between 1 and 1024 characters. Only one + of prefixMatch, fullPathMatch or regexMatch must be specified. + required: false + type: str + header_matches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + elements: dict + required: false + type: list + suboptions: + exact_match: + description: + - The value should exactly match contents of exactMatch. Only + one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + required: false + type: str + header_name: + description: + - The name of the HTTP header to match. For matching against the + HTTP request's authority, use a headerMatch with the header + name ":authority". For matching a request's method, use the + headerName ":method". + required: true + type: str + invert_match: + description: + - If set to false, the headerMatch is considered a match if the + match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT met. + Defaults to false. + required: false + default: 'false' + type: bool + prefix_match: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: str + present_match: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value + or not. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: bool + range_match: + description: + - The header value must be an integer and its value must be in + the range specified in rangeMatch. If the header does not contain + an integer, number or is empty, the match fails. For example + for a range [-5, 0] * -3 will match * 0 will not match * 0.25 + will not match * -3someString will not match. + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: dict + suboptions: + range_end: + description: + - The end of the range (exclusive). + required: true + type: int + range_start: + description: + - The start of the range (inclusive). + required: true + type: int + regex_match: + description: + - 'The value of the header must match the regular expression specified + in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript + For matching against a port specified in the HTTP request, use + a headerMatch with headerName set to PORT and a regular expression + that satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: str + suffix_match: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: str + ignore_case: + description: + - Specifies that prefixMatch and fullPathMatch matches are case sensitive. + - Defaults to false. + required: false + default: 'false' + type: bool + metadata_filters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing + configuration to a limited set xDS compliant clients. In their xDS + requests to Loadbalancer, xDS clients present node metadata. If + a match takes place, the relevant routing configuration is made + available to those proxies. For each metadataFilter in this list, + if its filterMatchCriteria is set to MATCH_ANY, at least one of + the filterLabels must match the corresponding label provided in + the metadata. If its filterMatchCriteria is set to MATCH_ALL, then + all of its filterLabels must match with corresponding labels in + the provided metadata. metadataFilters specified here can be overrides + those specified in ForwardingRule that refers to this UrlMap. metadataFilters + only applies to Loadbalancers that have their loadBalancingScheme + set to INTERNAL_SELF_MANAGED. + elements: dict + required: false + type: list + suboptions: + filter_labels: + description: + - The list of label value pairs that must match labels in the + provided metadata based on filterMatchCriteria This list must + not be empty and can have at the most 64 entries. + elements: dict + required: true + type: list + suboptions: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + required: true + type: str + value: + description: + - The value of the label must match the specified value. value + can have a maximum length of 1024 characters. + required: true + type: str + filter_match_criteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: * MATCH_ANY: At least one of the + filterLabels must have a matching label in the provided metadata.' + - "* MATCH_ALL: All filterLabels must have matching labels in + the provided metadata." + - 'Some valid choices include: "MATCH_ALL", "MATCH_ANY"' + required: true + type: str + prefix_match: + description: + - For satisfying the matchRule condition, the request's path must + begin with the specified prefixMatch. prefixMatch must begin with + a /. The value must be between 1 and 1024 characters. Only one of + prefixMatch, fullPathMatch or regexMatch must be specified. + required: false + type: str + query_parameter_matches: + description: + - Specifies a list of query parameter match criteria, all of which + must match corresponding query parameters in the request. + elements: dict + required: false + type: list + suboptions: + exact_match: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + required: false + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + required: true + type: str + present_match: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the parameter + has a value or not. Only one of presentMatch, exactMatch and + regexMatch must be set. + required: false + type: bool + regex_match: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For + the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be + set. + required: false + type: str + regex_match: + description: + - For satisfying the matchRule condition, the path of the request + must satisfy the regular expression specified in regexMatch after + removing any query parameters and anchor supplied with the original + URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + required: false + type: str + route_action: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction specifies + any weightedBackendServices, service must not be set. Conversely if + service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + required: false + type: dict + suboptions: + cors_policy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + required: false + type: dict + suboptions: + allow_credentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to + false. + required: false + default: 'false' + type: bool + allow_headers: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + elements: str + required: false + type: list + allow_methods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + elements: str + required: false + type: list + allow_origin_regexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + elements: str + required: false + type: list + allow_origins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + elements: str + required: false + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults + to false. + required: false + default: 'false' + type: bool + expose_headers: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + elements: str + required: false + type: list + max_age: + description: + - Specifies how long the results of a preflight request can be + cached. This translates to the content for the Access-Control-Max-Age + header. + required: false + type: int + fault_injection_policy: + description: + - The specification for fault injection introduced into traffic to + test the resiliency of clients to backend service failure. As part + of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests + before sending those request to the backend service. Similarly requests + from clients can be aborted by the Loadbalancer for a percentage + of requests. timeout and retry_policy will be ignored by clients + that are configured with a fault_injection_policy. + required: false + type: dict + suboptions: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + required: false + type: dict + suboptions: + http_status: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + required: false + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + required: false + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + required: false + type: dict + suboptions: + fixed_delay: + description: + - Specifies the value of the fixed delay interval. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + required: true + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + required: false + type: str + request_mirror_policy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + required: false + type: dict + suboptions: + backend_service: + description: + - The RegionBackendService resource being mirrored to. + - 'This field represents a link to a RegionBackendService resource + in GCP. It can be specified in two ways. First, you can place + a dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_region_backend_service task and then set this + backend_service field to "{{ name-of-resource }}"' + required: true + type: dict + retry_policy: + description: + - Specifies the retry policy associated with this route. + required: false + type: dict + suboptions: + num_retries: + description: + - Specifies the allowed number retries. This number must be > + 0. + required: true + type: int + per_try_timeout: + description: + - Specifies a non-zero timeout per retry attempt. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. Must + be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 + to 315,576,000,000 inclusive. + required: true + type: str + retry_conditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or + if the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is + set to resource-exhausted * unavailable: Loadbalancer will retry + if the gRPC status code in the response header is set to unavailable + ." + elements: str + required: false + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + required: true + type: str + url_rewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + required: false + type: dict + suboptions: + host_rewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + required: false + type: str + path_prefix_rewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + required: false + type: str + weighted_backend_services: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that + flows to their corresponding backend service. If all traffic needs + to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + elements: dict + required: false + type: list + suboptions: + backend_service: + description: + - The default RegionBackendService resource. Before forwarding + the request to backendService, the loadbalancer applies any + relevant headerActions specified as part of this backendServiceWeight. + - 'This field represents a link to a RegionBackendService resource + in GCP. It can be specified in two ways. First, you can place + a dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_region_backend_service task and then set this + backend_service field to "{{ name-of-resource }}"' + required: true + type: dict + header_action: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response + back to the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + elements: str + required: false + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for + new traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService + as determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + required: true + type: int + url_redirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between + 1 and 255 characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. + - If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps + used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. + - The value must be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path of + the original request will be used for the redirect. The value must + be between 1 and 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value + and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. The default value + is false. + required: false + default: 'false' + type: bool + path_rules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the + longest-path-first basis. For example: a pathRule with a path /a/b/c/* will + match before /a/b/* irrespective of the order in which those paths appear + in this list. Within a given pathMatcher, only one of pathRules or routeRules + must be set.' + elements: dict + required: false + type: list + suboptions: + service: + description: + - The region backend service resource to which traffic is directed if + this rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + - 'This field represents a link to a RegionBackendService resource in + GCP. It can be specified in two ways. First, you can place a dictionary + with key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_region_backend_service + task and then set this service field to "{{ name-of-resource }}"' + required: false + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the + only place a \\ * is allowed is at the end following a /. The string fed + to the path matcher does not include any text after the first ? or #, + and those chars are not allowed here.' + elements: str + required: true + type: list + route_action: + description: + - In response to a matching path, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction specifies + any weightedBackendServices, service must not be set. Conversely if + service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + required: false + type: dict + suboptions: + cors_policy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + required: false + type: dict + suboptions: + allow_credentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to + false. + required: false + default: 'false' + type: bool + allow_headers: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + elements: str + required: false + type: list + allow_methods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + elements: str + required: false + type: list + allow_origin_regexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + elements: str + required: false + type: list + allow_origins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + elements: str + required: false + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + required: true + type: bool + expose_headers: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + elements: str + required: false + type: list + max_age: + description: + - Specifies how long the results of a preflight request can be + cached. This translates to the content for the Access-Control-Max-Age + header. + required: false + type: int + fault_injection_policy: + description: + - The specification for fault injection introduced into traffic to + test the resiliency of clients to backend service failure. As part + of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests + before sending those request to the backend service. Similarly requests + from clients can be aborted by the Loadbalancer for a percentage + of requests. timeout and retry_policy will be ignored by clients + that are configured with a fault_injection_policy. + required: false + type: dict + suboptions: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + required: false + type: dict + suboptions: + http_status: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + required: true + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + required: true + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + required: false + type: dict + suboptions: + fixed_delay: + description: + - Specifies the value of the fixed delay interval. + required: true + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + required: true + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + required: true + type: str + request_mirror_policy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + required: false + type: dict + suboptions: + backend_service: + description: + - The RegionBackendService resource being mirrored to. + - 'This field represents a link to a RegionBackendService resource + in GCP. It can be specified in two ways. First, you can place + a dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_region_backend_service task and then set this + backend_service field to "{{ name-of-resource }}"' + required: true + type: dict + retry_policy: + description: + - Specifies the retry policy associated with this route. + required: false + type: dict + suboptions: + num_retries: + description: + - Specifies the allowed number retries. This number must be > + 0. + required: false + type: int + per_try_timeout: + description: + - Specifies a non-zero timeout per retry attempt. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. Must + be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 + to 315,576,000,000 inclusive. + required: true + type: str + retry_conditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: - 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or + if the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "- gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "- connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "- retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "- refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "- cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled - deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded - resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is + set to resource-exhausted - unavailable: Loadbalancer will retry + if the gRPC status code in the response header is set to unavailable + ." + elements: str + required: false + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + required: true + type: str + url_rewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + required: false + type: dict + suboptions: + host_rewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + required: false + type: str + path_prefix_rewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + required: false + type: str + weighted_backend_services: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that + flows to their corresponding backend service. If all traffic needs + to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + elements: dict + required: false + type: list + suboptions: + backend_service: + description: + - The default RegionBackendService resource. Before forwarding + the request to backendService, the loadbalancer applies any + relevant headerActions specified as part of this backendServiceWeight. + - 'This field represents a link to a RegionBackendService resource + in GCP. It can be specified in two ways. First, you can place + a dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_region_backend_service task and then set this + backend_service field to "{{ name-of-resource }}"' + required: true + type: dict + header_action: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response + back to the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + elements: str + required: false + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for + new traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService + as determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + required: true + type: int + url_redirect: + description: + - When a path pattern is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between + 1 and 255 characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. + - If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps + used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. + - The value must be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path of + the original request will be used for the redirect. The value must + be between 1 and 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value + and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. + required: false + default: 'false' + type: bool + default_url_redirect: + description: + - When none of the specified hostRules match, the request is redirected to + a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, + defaultService or defaultRouteAction must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps used + in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. + The default is set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied together + with prefixRedirect. Supply one alone or neither. If neither is supplied, + the path of the original request will be used for the redirect. The + value must be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply + one alone or neither. If neither is supplied, the path of the original + request will be used for the redirect. The value must be between 1 and + 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", + "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query + portion of the original URL is retained. + required: false + default: 'false' + type: bool + tests: + description: + - The list of expected URL mappings. Requests to update this UrlMap will succeed + only if all of the test cases pass. + elements: dict + required: false + type: list + suboptions: + description: + description: + - Description of this test case. + required: false + type: str + host: + description: + - Host portion of the URL. + required: true + type: str + path: + description: + - Path portion of the URL. + required: true + type: str + service: + description: + - A reference to expected RegionBackendService resource the given URL should + be mapped to. + - 'This field represents a link to a RegionBackendService resource in GCP. + It can be specified in two ways. First, you can place a dictionary with + key ''selfLink'' and value of your resource''s selfLink Alternatively, you + can add `register: name-of-resource` to a gcp_compute_region_backend_service + task and then set this service field to "{{ name-of-resource }}"' + required: true + type: dict + default_url_redirect: + description: + - When none of the specified hostRules match, the request is redirected to a URL + specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the one that + was supplied in the request. The value must be between 1 and 255 characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in TargetHttpProxys. + Setting this true for TargetHttpsProxy is not permitted. The default is + set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the one that + was supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the + path of the original request will be used for the redirect. The value must + be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one + alone or neither. If neither is supplied, the path of the original request + will be used for the redirect. The value must be between 1 and 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values are: + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", + "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL is removed + prior to redirecting the request. If set to false, the query portion of + the original URL is retained. + required: false + default: 'false' + type: bool + region: + description: + - A reference to the region where the url map resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a health check + google.cloud.gcp_compute_health_check: + name: "{{ resource_name }}" + type: HTTP + http_health_check: + port: 80 + check_interval_sec: 1 + timeout_sec: 1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a region backend service + google.cloud.gcp_compute_region_backend_service: + name: "{{ resource_name }}" + region: us-central1 + health_checks: + - "{{ healthcheck.selfLink }}" + protocol: HTTP + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a region URL map + google.cloud.gcp_compute_region_url_map: + name: test_object + region: us-central1 + default_service: "{{ backendservice }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +defaultService: + description: + - The full or partial URL of the defaultService resource to which traffic is directed + if none of the hostRules match. If defaultRouteAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if defaultService is specified, defaultRouteAction + cannot contain any weightedBackendServices. Conversely, if routeAction specifies + any weightedBackendServices, service must not be specified. Only one of defaultService, + defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. + returned: success + type: dict +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +hostRules: + description: + - The list of HostRules to use against the URL. + returned: success + type: complex + contains: + description: + description: + - An optional description of this HostRule. Provide this property when you create + the resource. + returned: success + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except * + will match any string of ([a-z0-9-.]*). In that case, * must be the first + character and must be followed in the pattern by either - or . + returned: success + type: list + pathMatcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL if + the hostRule matches the URL's host portion. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +fingerprint: + description: + - Fingerprint of this resource. This field is used internally during updates of + this resource. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +pathMatchers: + description: + - The list of named PathMatchers to use against the URL. + returned: success + type: complex + contains: + defaultService: + description: + - A reference to a RegionBackendService resource. This will be used if none + of the pathRules defined by this PathMatcher is matched by the URL's path + portion. + returned: success + type: dict + description: + description: + - An optional description of this resource. + returned: success + type: str + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + returned: success + type: str + routeRules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order of + specifying routeRules matters: the first rule that matches will cause its + specified routing action to take effect. Within a given pathMatcher, only + one of pathRules or routeRules must be set. routeRules are not supported in + UrlMaps intended for External load balancers.' + returned: success + type: complex + contains: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the order + in which load balancer will interpret routeRules. RouteRules are evaluated + in order of priority, from the lowest to highest number. The priority + of a rule decreases as its number increases (1, 2, 3, N+1). The first + rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove rules + in the future without affecting the rest of the rules. For example, 1, + 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you + could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future + without any impact on existing rules. + returned: success + type: int + service: + description: + - The region backend service resource to which traffic is directed if this + rule is matched. If routeAction is additionally specified, advanced routing + actions like URL Rewrites, etc. take effect prior to sending the request + to the backend. However, if service is specified, routeAction cannot contain + any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. The headerAction specified here are applied + before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r + outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to + the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + response prior to sending the response back to the client. + returned: success + type: list + matchRules: + description: + - The rules for determining a match. + returned: success + type: complex + contains: + fullPathMatch: + description: + - For satisfying the matchRule condition, the path of the request must + exactly match the value specified in fullPathMatch after removing + any query parameters and anchor that may be part of the original URL. + FullPathMatch must be between 1 and 1024 characters. Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + returned: success + type: str + headerMatches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The value should exactly match contents of exactMatch. Only one + of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + returned: success + type: str + headerName: + description: + - The name of the HTTP header to match. For matching against the + HTTP request's authority, use a headerMatch with the header name + ":authority". For matching a request's method, use the headerName + ":method". + returned: success + type: str + invertMatch: + description: + - If set to false, the headerMatch is considered a match if the + match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT met. + Defaults to false. + returned: success + type: bool + prefixMatch: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + presentMatch: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value or + not. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: bool + rangeMatch: + description: + - The header value must be an integer and its value must be in the + range specified in rangeMatch. If the header does not contain + an integer, number or is empty, the match fails. For example for + a range [-5, 0] * -3 will match * 0 will not match * 0.25 will + not match * -3someString will not match. + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: complex + contains: + rangeEnd: + description: + - The end of the range (exclusive). + returned: success + type: int + rangeStart: + description: + - The start of the range (inclusive). + returned: success + type: int + regexMatch: + description: + - 'The value of the header must match the regular expression specified + in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript + For matching against a port specified in the HTTP request, use + a headerMatch with headerName set to PORT and a regular expression + that satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + suffixMatch: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + ignoreCase: + description: + - Specifies that prefixMatch and fullPathMatch matches are case sensitive. + - Defaults to false. + returned: success + type: bool + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing configuration + to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, + xDS clients present node metadata. If a match takes place, the relevant + routing configuration is made available to those proxies. For each + metadataFilter in this list, if its filterMatchCriteria is set to + MATCH_ANY, at least one of the filterLabels must match the corresponding + label provided in the metadata. If its filterMatchCriteria is set + to MATCH_ALL, then all of its filterLabels must match with corresponding + labels in the provided metadata. metadataFilters specified here can + be overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have their + loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterLabels: + description: + - The list of label value pairs that must match labels in the provided + metadata based on filterMatchCriteria This list must not be empty + and can have at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + returned: success + type: str + value: + description: + - The value of the label must match the specified value. value + can have a maximum length of 1024 characters. + returned: success + type: str + filterMatchCriteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: * MATCH_ANY: At least one of the + filterLabels must have a matching label in the provided metadata.' + - "* MATCH_ALL: All filterLabels must have matching labels in the + provided metadata." + returned: success + type: str + prefixMatch: + description: + - For satisfying the matchRule condition, the request's path must begin + with the specified prefixMatch. prefixMatch must begin with a /. The + value must be between 1 and 1024 characters. Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + returned: success + type: str + queryParameterMatches: + description: + - Specifies a list of query parameter match criteria, all of which must + match corresponding query parameters in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + returned: success + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + returned: success + type: str + presentMatch: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the parameter + has a value or not. Only one of presentMatch, exactMatch and regexMatch + must be set. + returned: success + type: bool + regexMatch: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For the + regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be set. + returned: success + type: str + regexMatch: + description: + - For satisfying the matchRule condition, the path of the request must + satisfy the regular expression specified in regexMatch after removing + any query parameters and anchor supplied with the original URL. For + regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + routeAction: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction specifies + any weightedBackendServices, service must not be set. Conversely if service + is set, routeAction cannot contain any weightedBackendServices. Only one + of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults to + false. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can be cached. + This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. As part of fault + injection, when clients send requests to a backend service, delays + can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from + clients can be aborted by the Loadbalancer for a percentage of requests. + timeout and retry_policy will be ignored by clients that are configured + with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The RegionBackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or if + the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is set + to resource-exhausted * unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable + ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. The value + must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that flows + to their corresponding backend service. If all traffic needs to go + to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are applied + depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default RegionBackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to + take effect for the selected backendService. headerAction specified + here take effect before headerAction in the enclosing HttpRouteRule, + PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the + request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to the + client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for new + traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService as + determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction must + not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between 1 + and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to + https. + - If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps + used in TargetHttpProxys. Setting this true for TargetHttpsProxy is + not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. + - The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be + between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. The default value is + false. + returned: success + type: bool + pathRules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the longest-path-first + basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* + irrespective of the order in which those paths appear in this list. Within + a given pathMatcher, only one of pathRules or routeRules must be set.' + returned: success + type: complex + contains: + service: + description: + - The region backend service resource to which traffic is directed if this + rule is matched. If routeAction is additionally specified, advanced routing + actions like URL Rewrites, etc. take effect prior to sending the request + to the backend. However, if service is specified, routeAction cannot contain + any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + returned: success + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the only + place a \\* is allowed is at the end following a /. The string fed to the + path matcher does not include any text after the first ? or #, and those + chars are not allowed here.' + returned: success + type: list + routeAction: + description: + - In response to a matching path, the load balancer performs advanced routing + actions like URL rewrites, header transformations, etc. prior to forwarding + the request to the selected backend. If routeAction specifies any weightedBackendServices, + service must not be set. Conversely if service is set, routeAction cannot + contain any weightedBackendServices. Only one of routeAction or urlRedirect + must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can be cached. + This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. As part of fault + injection, when clients send requests to a backend service, delays + can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from + clients can be aborted by the Loadbalancer for a percentage of requests. + timeout and retry_policy will be ignored by clients that are configured + with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The RegionBackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: - 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or if + the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "- gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "- connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "- retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "- refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "- cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled - deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded - resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is set + to resource-exhausted - unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable + ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. The value + must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that flows + to their corresponding backend service. If all traffic needs to go + to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are applied + depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default RegionBackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to + take effect for the selected backendService. headerAction specified + here take effect before headerAction in the enclosing HttpRouteRule, + PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the + request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to the + client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for new + traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService as + determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When a path pattern is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction must + not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between 1 + and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to + https. + - If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps + used in TargetHttpProxys. Setting this true for TargetHttpsProxy is + not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. + - The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be + between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. + returned: success + type: bool + defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected to a + URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps used + in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. + The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied together + with prefixRedirect. Supply one alone or neither. If neither is supplied, + the path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one + alone or neither. If neither is supplied, the path of the original request + will be used for the redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query portion + of the original URL is retained. + returned: success + type: bool +tests: + description: + - The list of expected URL mappings. Requests to update this UrlMap will succeed + only if all of the test cases pass. + returned: success + type: complex + contains: + description: + description: + - Description of this test case. + returned: success + type: str + host: + description: + - Host portion of the URL. + returned: success + type: str + path: + description: + - Path portion of the URL. + returned: success + type: str + service: + description: + - A reference to expected RegionBackendService resource the given URL should + be mapped to. + returned: success + type: dict +defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected to a URL + specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the one that + was supplied in the request. The value must be between 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in TargetHttpProxys. + Setting this true for TargetHttpsProxy is not permitted. The default is set + to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the one that + was supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the path + of the original request will be used for the redirect. The value must be between + 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one alone + or neither. If neither is supplied, the path of the original request will + be used for the redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values are: + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to + 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL is removed + prior to redirecting the request. If set to false, the query portion of the + original URL is retained. + returned: success + type: bool +region: + description: + - A reference to the region where the url map resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + default_service=dict(type='dict'), + description=dict(type='str'), + host_rules=dict( + type='list', + elements='dict', + options=dict( + description=dict(type='str'), hosts=dict(required=True, type='list', elements='str'), path_matcher=dict(required=True, type='str') + ), + ), + name=dict(required=True, type='str'), + path_matchers=dict( + type='list', + elements='dict', + options=dict( + default_service=dict(type='dict'), + description=dict(type='str'), + name=dict(required=True, type='str'), + route_rules=dict( + type='list', + elements='dict', + options=dict( + priority=dict(required=True, type='int'), + service=dict(type='dict'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + match_rules=dict( + type='list', + elements='dict', + options=dict( + full_path_match=dict(type='str'), + header_matches=dict( + type='list', + elements='dict', + options=dict( + exact_match=dict(type='str'), + header_name=dict(required=True, type='str'), + invert_match=dict(type='bool'), + prefix_match=dict(type='str'), + present_match=dict(type='bool'), + range_match=dict( + type='dict', + options=dict(range_end=dict(required=True, type='int'), range_start=dict(required=True, type='int')), + ), + regex_match=dict(type='str'), + suffix_match=dict(type='str'), + ), + ), + ignore_case=dict(type='bool'), + metadata_filters=dict( + type='list', + elements='dict', + options=dict( + filter_labels=dict( + required=True, + type='list', + elements='dict', + options=dict(name=dict(required=True, type='str'), value=dict(required=True, type='str')), + ), + filter_match_criteria=dict(required=True, type='str'), + ), + ), + prefix_match=dict(type='str'), + query_parameter_matches=dict( + type='list', + elements='dict', + options=dict( + exact_match=dict(type='str'), + name=dict(required=True, type='str'), + present_match=dict(type='bool'), + regex_match=dict(type='str'), + ), + ), + regex_match=dict(type='str'), + ), + ), + route_action=dict( + type='dict', + options=dict( + cors_policy=dict( + type='dict', + options=dict( + allow_credentials=dict(type='bool'), + allow_headers=dict(type='list', elements='str'), + allow_methods=dict(type='list', elements='str'), + allow_origin_regexes=dict(type='list', elements='str'), + allow_origins=dict(type='list', elements='str'), + disabled=dict(type='bool'), + expose_headers=dict(type='list', elements='str'), + max_age=dict(type='int'), + ), + ), + fault_injection_policy=dict( + type='dict', + options=dict( + abort=dict(type='dict', options=dict(http_status=dict(type='int'), percentage=dict(type='str'))), + delay=dict( + type='dict', + options=dict( + fixed_delay=dict( + type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str')) + ), + percentage=dict(type='str'), + ), + ), + ), + ), + request_mirror_policy=dict(type='dict', options=dict(backend_service=dict(required=True, type='dict'))), + retry_policy=dict( + type='dict', + options=dict( + num_retries=dict(required=True, type='int'), + per_try_timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + retry_conditions=dict(type='list', elements='str'), + ), + ), + timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + url_rewrite=dict(type='dict', options=dict(host_rewrite=dict(type='str'), path_prefix_rewrite=dict(type='str'))), + weighted_backend_services=dict( + type='list', + elements='dict', + options=dict( + backend_service=dict(required=True, type='dict'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + weight=dict(required=True, type='int'), + ), + ), + ), + ), + url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + ), + ), + path_rules=dict( + type='list', + elements='dict', + options=dict( + service=dict(type='dict'), + paths=dict(required=True, type='list', elements='str'), + route_action=dict( + type='dict', + options=dict( + cors_policy=dict( + type='dict', + options=dict( + allow_credentials=dict(type='bool'), + allow_headers=dict(type='list', elements='str'), + allow_methods=dict(type='list', elements='str'), + allow_origin_regexes=dict(type='list', elements='str'), + allow_origins=dict(type='list', elements='str'), + disabled=dict(required=True, type='bool'), + expose_headers=dict(type='list', elements='str'), + max_age=dict(type='int'), + ), + ), + fault_injection_policy=dict( + type='dict', + options=dict( + abort=dict( + type='dict', + options=dict(http_status=dict(required=True, type='int'), percentage=dict(required=True, type='str')), + ), + delay=dict( + type='dict', + options=dict( + fixed_delay=dict( + required=True, + type='dict', + options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str')), + ), + percentage=dict(required=True, type='str'), + ), + ), + ), + ), + request_mirror_policy=dict(type='dict', options=dict(backend_service=dict(required=True, type='dict'))), + retry_policy=dict( + type='dict', + options=dict( + num_retries=dict(type='int'), + per_try_timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + retry_conditions=dict(type='list', elements='str'), + ), + ), + timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + url_rewrite=dict(type='dict', options=dict(host_rewrite=dict(type='str'), path_prefix_rewrite=dict(type='str'))), + weighted_backend_services=dict( + type='list', + elements='dict', + options=dict( + backend_service=dict(required=True, type='dict'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + weight=dict(required=True, type='int'), + ), + ), + ), + ), + url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + ), + ), + default_url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + ), + ), + tests=dict( + type='list', + elements='dict', + options=dict( + description=dict(type='str'), + host=dict(required=True, type='str'), + path=dict(required=True, type='str'), + service=dict(required=True, type='dict'), + ), + ), + default_url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#urlMap' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#urlMap', + u'defaultService': replace_resource_dict(module.params.get(u'default_service', {}), 'selfLink'), + u'description': module.params.get('description'), + u'hostRules': RegionUrlMapHostrulesArray(module.params.get('host_rules', []), module).to_request(), + u'name': module.params.get('name'), + u'pathMatchers': RegionUrlMapPathmatchersArray(module.params.get('path_matchers', []), module).to_request(), + u'tests': RegionUrlMapTestsArray(module.params.get('tests', []), module).to_request(), + u'defaultUrlRedirect': RegionUrlMapDefaulturlredirect(module.params.get('default_url_redirect', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/urlMaps/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/urlMaps".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'defaultService': response.get(u'defaultService'), + u'description': response.get(u'description'), + u'hostRules': RegionUrlMapHostrulesArray(response.get(u'hostRules', []), module).from_response(), + u'id': response.get(u'id'), + u'fingerprint': response.get(u'fingerprint'), + u'name': module.params.get('name'), + u'pathMatchers': RegionUrlMapPathmatchersArray(response.get(u'pathMatchers', []), module).from_response(), + u'tests': RegionUrlMapTestsArray(response.get(u'tests', []), module).from_response(), + u'defaultUrlRedirect': RegionUrlMapDefaulturlredirect(response.get(u'defaultUrlRedirect', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#urlMap') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RegionUrlMapHostrulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'description': item.get('description'), u'hosts': item.get('hosts'), u'pathMatcher': item.get('path_matcher')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'description': item.get(u'description'), u'hosts': item.get(u'hosts'), u'pathMatcher': item.get(u'pathMatcher')}) + + +class RegionUrlMapPathmatchersArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'defaultService': replace_resource_dict(item.get(u'default_service', {}), 'selfLink'), + u'description': item.get('description'), + u'name': item.get('name'), + u'routeRules': RegionUrlMapRouterulesArray(item.get('route_rules', []), self.module).to_request(), + u'pathRules': RegionUrlMapPathrulesArray(item.get('path_rules', []), self.module).to_request(), + u'defaultUrlRedirect': RegionUrlMapDefaulturlredirect(item.get('default_url_redirect', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'defaultService': item.get(u'defaultService'), + u'description': item.get(u'description'), + u'name': item.get(u'name'), + u'routeRules': RegionUrlMapRouterulesArray(item.get(u'routeRules', []), self.module).from_response(), + u'pathRules': RegionUrlMapPathrulesArray(item.get(u'pathRules', []), self.module).from_response(), + u'defaultUrlRedirect': RegionUrlMapDefaulturlredirect(item.get(u'defaultUrlRedirect', {}), self.module).from_response(), + } + ) + + +class RegionUrlMapRouterulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'priority': item.get('priority'), + u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink'), + u'headerAction': RegionUrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'matchRules': RegionUrlMapMatchrulesArray(item.get('match_rules', []), self.module).to_request(), + u'routeAction': RegionUrlMapRouteaction(item.get('route_action', {}), self.module).to_request(), + u'urlRedirect': RegionUrlMapUrlredirect(item.get('url_redirect', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'priority': item.get(u'priority'), + u'service': item.get(u'service'), + u'headerAction': RegionUrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'matchRules': RegionUrlMapMatchrulesArray(item.get(u'matchRules', []), self.module).from_response(), + u'routeAction': RegionUrlMapRouteaction(item.get(u'routeAction', {}), self.module).from_response(), + u'urlRedirect': RegionUrlMapUrlredirect(item.get(u'urlRedirect', {}), self.module).from_response(), + } + ) + + +class RegionUrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': RegionUrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': RegionUrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': RegionUrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': RegionUrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class RegionUrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class RegionUrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class RegionUrlMapMatchrulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'fullPathMatch': item.get('full_path_match'), + u'headerMatches': RegionUrlMapHeadermatchesArray(item.get('header_matches', []), self.module).to_request(), + u'ignoreCase': item.get('ignore_case'), + u'metadataFilters': RegionUrlMapMetadatafiltersArray(item.get('metadata_filters', []), self.module).to_request(), + u'prefixMatch': item.get('prefix_match'), + u'queryParameterMatches': RegionUrlMapQueryparametermatchesArray(item.get('query_parameter_matches', []), self.module).to_request(), + u'regexMatch': item.get('regex_match'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'fullPathMatch': item.get(u'fullPathMatch'), + u'headerMatches': RegionUrlMapHeadermatchesArray(item.get(u'headerMatches', []), self.module).from_response(), + u'ignoreCase': item.get(u'ignoreCase'), + u'metadataFilters': RegionUrlMapMetadatafiltersArray(item.get(u'metadataFilters', []), self.module).from_response(), + u'prefixMatch': item.get(u'prefixMatch'), + u'queryParameterMatches': RegionUrlMapQueryparametermatchesArray(item.get(u'queryParameterMatches', []), self.module).from_response(), + u'regexMatch': item.get(u'regexMatch'), + } + ) + + +class RegionUrlMapHeadermatchesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get('exact_match'), + u'headerName': item.get('header_name'), + u'invertMatch': item.get('invert_match'), + u'prefixMatch': item.get('prefix_match'), + u'presentMatch': item.get('present_match'), + u'rangeMatch': RegionUrlMapRangematch(item.get('range_match', {}), self.module).to_request(), + u'regexMatch': item.get('regex_match'), + u'suffixMatch': item.get('suffix_match'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get(u'exactMatch'), + u'headerName': item.get(u'headerName'), + u'invertMatch': item.get(u'invertMatch'), + u'prefixMatch': item.get(u'prefixMatch'), + u'presentMatch': item.get(u'presentMatch'), + u'rangeMatch': RegionUrlMapRangematch(item.get(u'rangeMatch', {}), self.module).from_response(), + u'regexMatch': item.get(u'regexMatch'), + u'suffixMatch': item.get(u'suffixMatch'), + } + ) + + +class RegionUrlMapRangematch(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rangeEnd': self.request.get('range_end'), u'rangeStart': self.request.get('range_start')}) + + def from_response(self): + return remove_nones_from_dict({u'rangeEnd': self.request.get(u'rangeEnd'), u'rangeStart': self.request.get(u'rangeStart')}) + + +class RegionUrlMapMetadatafiltersArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'filterLabels': RegionUrlMapFilterlabelsArray(item.get('filter_labels', []), self.module).to_request(), + u'filterMatchCriteria': item.get('filter_match_criteria'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'filterLabels': RegionUrlMapFilterlabelsArray(item.get(u'filterLabels', []), self.module).from_response(), + u'filterMatchCriteria': item.get(u'filterMatchCriteria'), + } + ) + + +class RegionUrlMapFilterlabelsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'value': item.get('value')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'value': item.get(u'value')}) + + +class RegionUrlMapQueryparametermatchesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get('exact_match'), + u'name': item.get('name'), + u'presentMatch': item.get('present_match'), + u'regexMatch': item.get('regex_match'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get(u'exactMatch'), + u'name': item.get(u'name'), + u'presentMatch': item.get(u'presentMatch'), + u'regexMatch': item.get(u'regexMatch'), + } + ) + + +class RegionUrlMapRouteaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'corsPolicy': RegionUrlMapCorspolicy(self.request.get('cors_policy', {}), self.module).to_request(), + u'faultInjectionPolicy': RegionUrlMapFaultinjectionpolicy(self.request.get('fault_injection_policy', {}), self.module).to_request(), + u'requestMirrorPolicy': RegionUrlMapRequestmirrorpolicy(self.request.get('request_mirror_policy', {}), self.module).to_request(), + u'retryPolicy': RegionUrlMapRetrypolicy(self.request.get('retry_policy', {}), self.module).to_request(), + u'timeout': RegionUrlMapTimeout(self.request.get('timeout', {}), self.module).to_request(), + u'urlRewrite': RegionUrlMapUrlrewrite(self.request.get('url_rewrite', {}), self.module).to_request(), + u'weightedBackendServices': RegionUrlMapWeightedbackendservicesArray( + self.request.get('weighted_backend_services', []), self.module + ).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'corsPolicy': RegionUrlMapCorspolicy(self.request.get(u'corsPolicy', {}), self.module).from_response(), + u'faultInjectionPolicy': RegionUrlMapFaultinjectionpolicy(self.request.get(u'faultInjectionPolicy', {}), self.module).from_response(), + u'requestMirrorPolicy': RegionUrlMapRequestmirrorpolicy(self.request.get(u'requestMirrorPolicy', {}), self.module).from_response(), + u'retryPolicy': RegionUrlMapRetrypolicy(self.request.get(u'retryPolicy', {}), self.module).from_response(), + u'timeout': RegionUrlMapTimeout(self.request.get(u'timeout', {}), self.module).from_response(), + u'urlRewrite': RegionUrlMapUrlrewrite(self.request.get(u'urlRewrite', {}), self.module).from_response(), + u'weightedBackendServices': RegionUrlMapWeightedbackendservicesArray( + self.request.get(u'weightedBackendServices', []), self.module + ).from_response(), + } + ) + + +class RegionUrlMapCorspolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get('allow_credentials'), + u'allowHeaders': self.request.get('allow_headers'), + u'allowMethods': self.request.get('allow_methods'), + u'allowOriginRegexes': self.request.get('allow_origin_regexes'), + u'allowOrigins': self.request.get('allow_origins'), + u'disabled': self.request.get('disabled'), + u'exposeHeaders': self.request.get('expose_headers'), + u'maxAge': self.request.get('max_age'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get(u'allowCredentials'), + u'allowHeaders': self.request.get(u'allowHeaders'), + u'allowMethods': self.request.get(u'allowMethods'), + u'allowOriginRegexes': self.request.get(u'allowOriginRegexes'), + u'allowOrigins': self.request.get(u'allowOrigins'), + u'disabled': self.request.get(u'disabled'), + u'exposeHeaders': self.request.get(u'exposeHeaders'), + u'maxAge': self.request.get(u'maxAge'), + } + ) + + +class RegionUrlMapFaultinjectionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'abort': RegionUrlMapAbort(self.request.get('abort', {}), self.module).to_request(), + u'delay': RegionUrlMapDelay(self.request.get('delay', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'abort': RegionUrlMapAbort(self.request.get(u'abort', {}), self.module).from_response(), + u'delay': RegionUrlMapDelay(self.request.get(u'delay', {}), self.module).from_response(), + } + ) + + +class RegionUrlMapAbort(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'httpStatus': self.request.get('http_status'), u'percentage': self.request.get('percentage')}) + + def from_response(self): + return remove_nones_from_dict({u'httpStatus': self.request.get(u'httpStatus'), u'percentage': self.request.get(u'percentage')}) + + +class RegionUrlMapDelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'fixedDelay': RegionUrlMapFixeddelay(self.request.get('fixed_delay', {}), self.module).to_request(), + u'percentage': self.request.get('percentage'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'fixedDelay': RegionUrlMapFixeddelay(self.request.get(u'fixedDelay', {}), self.module).from_response(), + u'percentage': self.request.get(u'percentage'), + } + ) + + +class RegionUrlMapFixeddelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class RegionUrlMapRequestmirrorpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'backendService': replace_resource_dict(self.request.get(u'backend_service', {}), 'selfLink')}) + + def from_response(self): + return remove_nones_from_dict({u'backendService': self.request.get(u'backendService')}) + + +class RegionUrlMapRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get('num_retries'), + u'perTryTimeout': RegionUrlMapPertrytimeout(self.request.get('per_try_timeout', {}), self.module).to_request(), + u'retryConditions': self.request.get('retry_conditions'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get(u'numRetries'), + u'perTryTimeout': RegionUrlMapPertrytimeout(self.request.get(u'perTryTimeout', {}), self.module).from_response(), + u'retryConditions': self.request.get(u'retryConditions'), + } + ) + + +class RegionUrlMapPertrytimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class RegionUrlMapTimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class RegionUrlMapUrlrewrite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get('host_rewrite'), u'pathPrefixRewrite': self.request.get('path_prefix_rewrite')}) + + def from_response(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get(u'hostRewrite'), u'pathPrefixRewrite': self.request.get(u'pathPrefixRewrite')}) + + +class RegionUrlMapWeightedbackendservicesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'backendService': replace_resource_dict(item.get(u'backend_service', {}), 'selfLink'), + u'headerAction': RegionUrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'weight': item.get('weight'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'backendService': item.get(u'backendService'), + u'headerAction': RegionUrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'weight': item.get(u'weight'), + } + ) + + +class RegionUrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': RegionUrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': RegionUrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': RegionUrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': RegionUrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class RegionUrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class RegionUrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class RegionUrlMapUrlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class RegionUrlMapPathrulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink'), + u'paths': item.get('paths'), + u'routeAction': RegionUrlMapRouteaction(item.get('route_action', {}), self.module).to_request(), + u'urlRedirect': RegionUrlMapUrlredirect(item.get('url_redirect', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'service': item.get(u'service'), + u'paths': item.get(u'paths'), + u'routeAction': RegionUrlMapRouteaction(item.get(u'routeAction', {}), self.module).from_response(), + u'urlRedirect': RegionUrlMapUrlredirect(item.get(u'urlRedirect', {}), self.module).from_response(), + } + ) + + +class RegionUrlMapRouteaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'corsPolicy': RegionUrlMapCorspolicy(self.request.get('cors_policy', {}), self.module).to_request(), + u'faultInjectionPolicy': RegionUrlMapFaultinjectionpolicy(self.request.get('fault_injection_policy', {}), self.module).to_request(), + u'requestMirrorPolicy': RegionUrlMapRequestmirrorpolicy(self.request.get('request_mirror_policy', {}), self.module).to_request(), + u'retryPolicy': RegionUrlMapRetrypolicy(self.request.get('retry_policy', {}), self.module).to_request(), + u'timeout': RegionUrlMapTimeout(self.request.get('timeout', {}), self.module).to_request(), + u'urlRewrite': RegionUrlMapUrlrewrite(self.request.get('url_rewrite', {}), self.module).to_request(), + u'weightedBackendServices': RegionUrlMapWeightedbackendservicesArray( + self.request.get('weighted_backend_services', []), self.module + ).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'corsPolicy': RegionUrlMapCorspolicy(self.request.get(u'corsPolicy', {}), self.module).from_response(), + u'faultInjectionPolicy': RegionUrlMapFaultinjectionpolicy(self.request.get(u'faultInjectionPolicy', {}), self.module).from_response(), + u'requestMirrorPolicy': RegionUrlMapRequestmirrorpolicy(self.request.get(u'requestMirrorPolicy', {}), self.module).from_response(), + u'retryPolicy': RegionUrlMapRetrypolicy(self.request.get(u'retryPolicy', {}), self.module).from_response(), + u'timeout': RegionUrlMapTimeout(self.request.get(u'timeout', {}), self.module).from_response(), + u'urlRewrite': RegionUrlMapUrlrewrite(self.request.get(u'urlRewrite', {}), self.module).from_response(), + u'weightedBackendServices': RegionUrlMapWeightedbackendservicesArray( + self.request.get(u'weightedBackendServices', []), self.module + ).from_response(), + } + ) + + +class RegionUrlMapCorspolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get('allow_credentials'), + u'allowHeaders': self.request.get('allow_headers'), + u'allowMethods': self.request.get('allow_methods'), + u'allowOriginRegexes': self.request.get('allow_origin_regexes'), + u'allowOrigins': self.request.get('allow_origins'), + u'disabled': self.request.get('disabled'), + u'exposeHeaders': self.request.get('expose_headers'), + u'maxAge': self.request.get('max_age'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get(u'allowCredentials'), + u'allowHeaders': self.request.get(u'allowHeaders'), + u'allowMethods': self.request.get(u'allowMethods'), + u'allowOriginRegexes': self.request.get(u'allowOriginRegexes'), + u'allowOrigins': self.request.get(u'allowOrigins'), + u'disabled': self.request.get(u'disabled'), + u'exposeHeaders': self.request.get(u'exposeHeaders'), + u'maxAge': self.request.get(u'maxAge'), + } + ) + + +class RegionUrlMapFaultinjectionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'abort': RegionUrlMapAbort(self.request.get('abort', {}), self.module).to_request(), + u'delay': RegionUrlMapDelay(self.request.get('delay', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'abort': RegionUrlMapAbort(self.request.get(u'abort', {}), self.module).from_response(), + u'delay': RegionUrlMapDelay(self.request.get(u'delay', {}), self.module).from_response(), + } + ) + + +class RegionUrlMapAbort(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'httpStatus': self.request.get('http_status'), u'percentage': self.request.get('percentage')}) + + def from_response(self): + return remove_nones_from_dict({u'httpStatus': self.request.get(u'httpStatus'), u'percentage': self.request.get(u'percentage')}) + + +class RegionUrlMapDelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'fixedDelay': RegionUrlMapFixeddelay(self.request.get('fixed_delay', {}), self.module).to_request(), + u'percentage': self.request.get('percentage'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'fixedDelay': RegionUrlMapFixeddelay(self.request.get(u'fixedDelay', {}), self.module).from_response(), + u'percentage': self.request.get(u'percentage'), + } + ) + + +class RegionUrlMapFixeddelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class RegionUrlMapRequestmirrorpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'backendService': replace_resource_dict(self.request.get(u'backend_service', {}), 'selfLink')}) + + def from_response(self): + return remove_nones_from_dict({u'backendService': self.request.get(u'backendService')}) + + +class RegionUrlMapRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get('num_retries'), + u'perTryTimeout': RegionUrlMapPertrytimeout(self.request.get('per_try_timeout', {}), self.module).to_request(), + u'retryConditions': self.request.get('retry_conditions'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get(u'numRetries'), + u'perTryTimeout': RegionUrlMapPertrytimeout(self.request.get(u'perTryTimeout', {}), self.module).from_response(), + u'retryConditions': self.request.get(u'retryConditions'), + } + ) + + +class RegionUrlMapPertrytimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class RegionUrlMapTimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class RegionUrlMapUrlrewrite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get('host_rewrite'), u'pathPrefixRewrite': self.request.get('path_prefix_rewrite')}) + + def from_response(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get(u'hostRewrite'), u'pathPrefixRewrite': self.request.get(u'pathPrefixRewrite')}) + + +class RegionUrlMapWeightedbackendservicesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'backendService': replace_resource_dict(item.get(u'backend_service', {}), 'selfLink'), + u'headerAction': RegionUrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'weight': item.get('weight'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'backendService': item.get(u'backendService'), + u'headerAction': RegionUrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'weight': item.get(u'weight'), + } + ) + + +class RegionUrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': RegionUrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': RegionUrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': RegionUrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': RegionUrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class RegionUrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class RegionUrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class RegionUrlMapUrlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class RegionUrlMapDefaulturlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class RegionUrlMapTestsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'description': item.get('description'), + u'host': item.get('host'), + u'path': item.get('path'), + u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'description': item.get(u'description'), u'host': item.get(u'host'), u'path': item.get(u'path'), u'service': item.get(u'service')} + ) + + +class RegionUrlMapDefaulturlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map_info.py new file mode 100644 index 000000000..ae5f174ca --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_region_url_map_info.py @@ -0,0 +1,1658 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_url_map_info +description: +- Gather info for GCP RegionUrlMap +short_description: Gather info for GCP RegionUrlMap +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - A reference to the region where the url map resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region URL map + gcp_compute_region_url_map_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + defaultService: + description: + - The full or partial URL of the defaultService resource to which traffic is + directed if none of the hostRules match. If defaultRouteAction is additionally + specified, advanced routing actions like URL Rewrites, etc. take effect prior + to sending the request to the backend. However, if defaultService is specified, + defaultRouteAction cannot contain any weightedBackendServices. Conversely, + if routeAction specifies any weightedBackendServices, service must not be + specified. Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService + must be set. + returned: success + type: dict + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + hostRules: + description: + - The list of HostRules to use against the URL. + returned: success + type: complex + contains: + description: + description: + - An optional description of this HostRule. Provide this property when you + create the resource. + returned: success + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except + * will match any string of ([a-z0-9-.]*). In that case, * must be the + first character and must be followed in the pattern by either - or . + returned: success + type: list + pathMatcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL + if the hostRule matches the URL's host portion. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + fingerprint: + description: + - Fingerprint of this resource. This field is used internally during updates + of this resource. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + pathMatchers: + description: + - The list of named PathMatchers to use against the URL. + returned: success + type: complex + contains: + defaultService: + description: + - A reference to a RegionBackendService resource. This will be used if none + of the pathRules defined by this PathMatcher is matched by the URL's path + portion. + returned: success + type: dict + description: + description: + - An optional description of this resource. + returned: success + type: str + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + returned: success + type: str + routeRules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order + of specifying routeRules matters: the first rule that matches will cause + its specified routing action to take effect. Within a given pathMatcher, + only one of pathRules or routeRules must be set. routeRules are not supported + in UrlMaps intended for External load balancers.' + returned: success + type: complex + contains: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the + order in which load balancer will interpret routeRules. RouteRules + are evaluated in order of priority, from the lowest to highest number. + The priority of a rule decreases as its number increases (1, 2, 3, + N+1). The first rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove + rules in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to + which you could add rules numbered from 6 to 8, 10 to 11, and 13 to + 15 in the future without any impact on existing rules. + returned: success + type: int + service: + description: + - The region backend service resource to which traffic is directed if + this rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. The headerAction specified + here are applied before the matching pathMatchers[].headerAction and + after pathMatchers[].routeRules[].r outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the + header, discarding any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back + to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the + header, discarding any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + returned: success + type: list + matchRules: + description: + - The rules for determining a match. + returned: success + type: complex + contains: + fullPathMatch: + description: + - For satisfying the matchRule condition, the path of the request + must exactly match the value specified in fullPathMatch after + removing any query parameters and anchor that may be part of the + original URL. FullPathMatch must be between 1 and 1024 characters. + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + headerMatches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The value should exactly match contents of exactMatch. Only + one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + returned: success + type: str + headerName: + description: + - The name of the HTTP header to match. For matching against + the HTTP request's authority, use a headerMatch with the header + name ":authority". For matching a request's method, use the + headerName ":method". + returned: success + type: str + invertMatch: + description: + - If set to false, the headerMatch is considered a match if + the match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT + met. Defaults to false. + returned: success + type: bool + prefixMatch: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + presentMatch: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value + or not. Only one of exactMatch, prefixMatch, suffixMatch, + regexMatch, presentMatch or rangeMatch must be set. + returned: success + type: bool + rangeMatch: + description: + - The header value must be an integer and its value must be + in the range specified in rangeMatch. If the header does not + contain an integer, number or is empty, the match fails. For + example for a range [-5, 0] * -3 will match * 0 will not match + * 0.25 will not match * -3someString will not match. + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: complex + contains: + rangeEnd: + description: + - The end of the range (exclusive). + returned: success + type: int + rangeStart: + description: + - The start of the range (inclusive). + returned: success + type: int + regexMatch: + description: + - 'The value of the header must match the regular expression + specified in regexMatch. For regular expression grammar, please + see: en.cppreference.com/w/cpp/regex/ecmascript For matching + against a port specified in the HTTP request, use a headerMatch + with headerName set to PORT and a regular expression that + satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + suffixMatch: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + ignoreCase: + description: + - Specifies that prefixMatch and fullPathMatch matches are case + sensitive. + - Defaults to false. + returned: success + type: bool + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing + configuration to a limited set xDS compliant clients. In their + xDS requests to Loadbalancer, xDS clients present node metadata. + If a match takes place, the relevant routing configuration is + made available to those proxies. For each metadataFilter in this + list, if its filterMatchCriteria is set to MATCH_ANY, at least + one of the filterLabels must match the corresponding label provided + in the metadata. If its filterMatchCriteria is set to MATCH_ALL, + then all of its filterLabels must match with corresponding labels + in the provided metadata. metadataFilters specified here can be + overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have + their loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterLabels: + description: + - The list of label value pairs that must match labels in the + provided metadata based on filterMatchCriteria This list must + not be empty and can have at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + returned: success + type: str + value: + description: + - The value of the label must match the specified value. + value can have a maximum length of 1024 characters. + returned: success + type: str + filterMatchCriteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: * MATCH_ANY: At least one of + the filterLabels must have a matching label in the provided + metadata.' + - "* MATCH_ALL: All filterLabels must have matching labels in + the provided metadata." + returned: success + type: str + prefixMatch: + description: + - For satisfying the matchRule condition, the request's path must + begin with the specified prefixMatch. prefixMatch must begin with + a /. The value must be between 1 and 1024 characters. Only one + of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + queryParameterMatches: + description: + - Specifies a list of query parameter match criteria, all of which + must match corresponding query parameters in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + returned: success + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + returned: success + type: str + presentMatch: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the + parameter has a value or not. Only one of presentMatch, exactMatch + and regexMatch must be set. + returned: success + type: bool + regexMatch: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For + the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be + set. + returned: success + type: str + regexMatch: + description: + - For satisfying the matchRule condition, the path of the request + must satisfy the regular expression specified in regexMatch after + removing any query parameters and anchor supplied with the original + URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + routeAction: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction + specifies any weightedBackendServices, service must not be set. Conversely + if service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This + translates to the Access- Control-Allow-Credentials header. + Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers + header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods + header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or + allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults + to false. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can + be cached. This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. + Similarly requests from clients can be aborted by the Loadbalancer + for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The + value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be + from 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The RegionBackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be + > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry + if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: + disconnects, reset, read timeout, connection failure, and + refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend + service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header + is set to resource-exhausted * unavailable: Loadbalancer will + retry if the gRPC status code in the response header is set + to unavailable ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding + the request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a + route match occurs. The weights determine the fraction of traffic + that flows to their corresponding backend service. If all traffic + needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default RegionBackendService resource. Before forwarding + the request to backendService, the loadbalancer applies any + relevant headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, + computed as weight / (sum of all weightedBackendService weights + in routeAction) . The selection of a backend service is determined + only for new traffic. Once a user's request has been directed + to a backendService, subsequent requests will be sent to the + same backendService as determined by the BackendService's + session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of + the one that was supplied in the request. The value must be between + 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. + - If set to false, the URL scheme of the redirected request will + remain the same as that of the request. This must only be set + for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of + the one that was supplied in the request. pathRedirect cannot + be supplied together with prefixRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request + will be used for the redirect. + - The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting + the request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path + of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default + value and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, + the request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original + URL is removed prior to redirecting the request. If set to false, + the query portion of the original URL is retained. The default + value is false. + returned: success + type: bool + pathRules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the + longest-path-first basis. For example: a pathRule with a path /a/b/c/* + will match before /a/b/* irrespective of the order in which those paths + appear in this list. Within a given pathMatcher, only one of pathRules + or routeRules must be set.' + returned: success + type: complex + contains: + service: + description: + - The region backend service resource to which traffic is directed if + this rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + returned: success + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the + only place a \\* is allowed is at the end following a /. The string + fed to the path matcher does not include any text after the first + ? or #, and those chars are not allowed here.' + returned: success + type: list + routeAction: + description: + - In response to a matching path, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction + specifies any weightedBackendServices, service must not be set. Conversely + if service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This + translates to the Access- Control-Allow-Credentials header. + Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers + header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods + header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or + allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can + be cached. This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. + Similarly requests from clients can be aborted by the Loadbalancer + for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The + value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be + from 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The RegionBackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be + > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: - 5xx: Loadbalancer will attempt a retry + if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: + disconnects, reset, read timeout, connection failure, and + refused streams.' + - "- gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "- connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "- retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "- refused-stream: Loadbalancer will retry if the backend + service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry." + - "- cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled - deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded - resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header + is set to resource-exhausted - unavailable: Loadbalancer will + retry if the gRPC status code in the response header is set + to unavailable ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding + the request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a + route match occurs. The weights determine the fraction of traffic + that flows to their corresponding backend service. If all traffic + needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default RegionBackendService resource. Before forwarding + the request to backendService, the loadbalancer applies any + relevant headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, + computed as weight / (sum of all weightedBackendService weights + in routeAction) . The selection of a backend service is determined + only for new traffic. Once a user's request has been directed + to a backendService, subsequent requests will be sent to the + same backendService as determined by the BackendService's + session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When a path pattern is matched, the request is redirected to a URL + specified by urlRedirect. If urlRedirect is specified, service or + routeAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of + the one that was supplied in the request. The value must be between + 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. + - If set to false, the URL scheme of the redirected request will + remain the same as that of the request. This must only be set + for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of + the one that was supplied in the request. pathRedirect cannot + be supplied together with prefixRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request + will be used for the redirect. + - The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting + the request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path + of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default + value and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, + the request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original + URL is removed prior to redirecting the request. If set to false, + the query portion of the original URL is retained. + returned: success + type: bool + defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected + to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, + defaultService or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between 1 + and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to + https. If set to false, the URL scheme of the redirected request will + remain the same as that of the request. This must only be set for + UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply + one alone or neither. If neither is supplied, the path of the original + request will be used for the redirect. The value must be between 1 + and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. + returned: success + type: bool + tests: + description: + - The list of expected URL mappings. Requests to update this UrlMap will succeed + only if all of the test cases pass. + returned: success + type: complex + contains: + description: + description: + - Description of this test case. + returned: success + type: str + host: + description: + - Host portion of the URL. + returned: success + type: str + path: + description: + - Path portion of the URL. + returned: success + type: str + service: + description: + - A reference to expected RegionBackendService resource the given URL should + be mapped to. + returned: success + type: dict + defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected to a + URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps used + in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. + The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied together + with prefixRedirect. Supply one alone or neither. If neither is supplied, + the path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one + alone or neither. If neither is supplied, the path of the original request + will be used for the redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query portion + of the original URL is retained. + returned: success + type: bool + region: + description: + - A reference to the region where the url map resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/urlMaps".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation.py new file mode 100644 index 000000000..8c82f8417 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation.py @@ -0,0 +1,685 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_reservation +description: +- Represents a reservation resource. A reservation ensures that capacity is held in + a specific zone even if the reserved VMs are not running. +- Reservations apply only to Compute Engine, Cloud Dataproc, and Google Kubernetes + Engine VM usage.Reservations do not apply to `f1-micro` or `g1-small` machine types, + preemptible VMs, sole tenant nodes, or other services not listed above like Cloud + SQL and Dataflow. +short_description: Creates a GCP Reservation +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + specific_reservation_required: + description: + - When set to true, only VMs that target this reservation by name can consume + this reservation. Otherwise, it can be consumed by VMs with affinity for any + reservation. Defaults to false. + required: false + default: 'false' + type: bool + specific_reservation: + description: + - Reservation for instances with specific machine shapes. + required: true + type: dict + suboptions: + count: + description: + - The number of resources that are allocated. + required: true + type: int + instance_properties: + description: + - The instance properties for the reservation. + required: true + type: dict + suboptions: + machine_type: + description: + - The name of the machine type to reserve. + required: true + type: str + min_cpu_platform: + description: + - The minimum CPU platform for the reservation. For example, `"Intel Skylake"`. + See U(https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) + for information on available CPU platforms. + required: false + type: str + guest_accelerators: + description: + - Guest accelerator type and count. + elements: dict + required: false + type: list + suboptions: + accelerator_type: + description: + - 'The full or partial URL of the accelerator type to attach to this + instance. For example: `projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100` + If you are creating an instance template, specify only the accelerator + name.' + required: true + type: str + accelerator_count: + description: + - The number of the guest accelerator cards exposed to this instance. + required: true + type: int + local_ssds: + description: + - The amount of local ssd to reserve with each instance. This reserves + disks of type `local-ssd`. + elements: dict + required: false + type: list + suboptions: + interface: + description: + - The disk interface to use for attaching this disk. + - 'Some valid choices include: "SCSI", "NVME"' + required: false + default: SCSI + type: str + disk_size_gb: + description: + - The size of the disk in base-2 GB. + required: true + type: int + zone: + description: + - The zone where the reservation is made. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/reservations)' +- 'Reserving zonal resources: U(https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a reservation + google.cloud.gcp_compute_reservation: + name: test_object + zone: us-central1-a + specific_reservation: + count: 1 + instance_properties: + min_cpu_platform: Intel Cascade Lake + machine_type: n2-standard-2 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +commitment: + description: + - Full or partial URL to a parent commitment. This field displays for reservations + that are tied to a commitment. + returned: success + type: str +specificReservationRequired: + description: + - When set to true, only VMs that target this reservation by name can consume this + reservation. Otherwise, it can be consumed by VMs with affinity for any reservation. + Defaults to false. + returned: success + type: bool +status: + description: + - The status of the reservation. + returned: success + type: str +specificReservation: + description: + - Reservation for instances with specific machine shapes. + returned: success + type: complex + contains: + count: + description: + - The number of resources that are allocated. + returned: success + type: int + inUseCount: + description: + - How many instances are in use. + returned: success + type: int + instanceProperties: + description: + - The instance properties for the reservation. + returned: success + type: complex + contains: + machineType: + description: + - The name of the machine type to reserve. + returned: success + type: str + minCpuPlatform: + description: + - The minimum CPU platform for the reservation. For example, `"Intel Skylake"`. + See U(https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) + for information on available CPU platforms. + returned: success + type: str + guestAccelerators: + description: + - Guest accelerator type and count. + returned: success + type: complex + contains: + acceleratorType: + description: + - 'The full or partial URL of the accelerator type to attach to this + instance. For example: `projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100` + If you are creating an instance template, specify only the accelerator + name.' + returned: success + type: str + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + localSsds: + description: + - The amount of local ssd to reserve with each instance. This reserves disks + of type `local-ssd`. + returned: success + type: complex + contains: + interface: + description: + - The disk interface to use for attaching this disk. + returned: success + type: str + diskSizeGb: + description: + - The size of the disk in base-2 GB. + returned: success + type: int +zone: + description: + - The zone where the reservation is made. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + specific_reservation_required=dict(type='bool'), + specific_reservation=dict( + required=True, + type='dict', + options=dict( + count=dict(required=True, type='int'), + instance_properties=dict( + required=True, + type='dict', + options=dict( + machine_type=dict(required=True, type='str'), + min_cpu_platform=dict(type='str'), + guest_accelerators=dict( + type='list', + elements='dict', + options=dict(accelerator_type=dict(required=True, type='str'), accelerator_count=dict(required=True, type='int')), + ), + local_ssds=dict( + type='list', + elements='dict', + options=dict(interface=dict(default='SCSI', type='str'), disk_size_gb=dict(required=True, type='int')), + ), + ), + ), + ), + ), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module)) + + +def update_fields(module, request, response): + if response.get('specificReservation') != request.get('specificReservation'): + specific_reservation_update(module, request, response) + + +def specific_reservation_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/reservations/{name}/resize"]).format(**module.params), + {u'specificReservation': ReservationSpecificreservation(module.params.get('specific_reservation', {}), module).to_request()}, + ) + + +def delete(module, link): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'zone': module.params.get('zone'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'specificReservationRequired': module.params.get('specific_reservation_required'), + u'specificReservation': ReservationSpecificreservation(module.params.get('specific_reservation', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/reservations/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/reservations".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'commitment': response.get(u'commitment'), + u'specificReservationRequired': response.get(u'specificReservationRequired'), + u'status': response.get(u'status'), + u'specificReservation': ReservationSpecificreservation(response.get(u'specificReservation', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink'])) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class ReservationSpecificreservation(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'count': self.request.get('count'), + u'instanceProperties': ReservationInstanceproperties(self.request.get('instance_properties', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'count': self.request.get(u'count'), + u'instanceProperties': ReservationInstanceproperties(self.module.params.get('instance_properties', {}), self.module).to_request(), + } + ) + + +class ReservationInstanceproperties(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'machineType': self.request.get('machine_type'), + u'minCpuPlatform': self.request.get('min_cpu_platform'), + u'guestAccelerators': ReservationGuestacceleratorsArray(self.request.get('guest_accelerators', []), self.module).to_request(), + u'localSsds': ReservationLocalssdsArray(self.request.get('local_ssds', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'machineType': self.request.get(u'machineType'), + u'minCpuPlatform': self.request.get(u'minCpuPlatform'), + u'guestAccelerators': ReservationGuestacceleratorsArray(self.request.get(u'guestAccelerators', []), self.module).from_response(), + u'localSsds': ReservationLocalssdsArray(self.request.get(u'localSsds', []), self.module).from_response(), + } + ) + + +class ReservationGuestacceleratorsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'acceleratorType': item.get('accelerator_type'), u'acceleratorCount': item.get('accelerator_count')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'acceleratorType': item.get(u'acceleratorType'), u'acceleratorCount': item.get(u'acceleratorCount')}) + + +class ReservationLocalssdsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'interface': item.get('interface'), u'diskSizeGb': item.get('disk_size_gb')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'interface': item.get(u'interface'), u'diskSizeGb': item.get(u'diskSizeGb')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation_info.py new file mode 100644 index 000000000..ee9ae46ba --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_reservation_info.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_reservation_info +description: +- Gather info for GCP Reservation +short_description: Gather info for GCP Reservation +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - The zone where the reservation is made. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a reservation + gcp_compute_reservation_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + commitment: + description: + - Full or partial URL to a parent commitment. This field displays for reservations + that are tied to a commitment. + returned: success + type: str + specificReservationRequired: + description: + - When set to true, only VMs that target this reservation by name can consume + this reservation. Otherwise, it can be consumed by VMs with affinity for any + reservation. Defaults to false. + returned: success + type: bool + status: + description: + - The status of the reservation. + returned: success + type: str + specificReservation: + description: + - Reservation for instances with specific machine shapes. + returned: success + type: complex + contains: + count: + description: + - The number of resources that are allocated. + returned: success + type: int + inUseCount: + description: + - How many instances are in use. + returned: success + type: int + instanceProperties: + description: + - The instance properties for the reservation. + returned: success + type: complex + contains: + machineType: + description: + - The name of the machine type to reserve. + returned: success + type: str + minCpuPlatform: + description: + - The minimum CPU platform for the reservation. For example, `"Intel + Skylake"`. See U(https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) + for information on available CPU platforms. + returned: success + type: str + guestAccelerators: + description: + - Guest accelerator type and count. + returned: success + type: complex + contains: + acceleratorType: + description: + - 'The full or partial URL of the accelerator type to attach to + this instance. For example: `projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100` + If you are creating an instance template, specify only the accelerator + name.' + returned: success + type: str + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + localSsds: + description: + - The amount of local ssd to reserve with each instance. This reserves + disks of type `local-ssd`. + returned: success + type: complex + contains: + interface: + description: + - The disk interface to use for attaching this disk. + returned: success + type: str + diskSizeGb: + description: + - The size of the disk in base-2 GB. + returned: success + type: int + zone: + description: + - The zone where the reservation is made. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/reservations".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy.py new file mode 100644 index 000000000..5a668ce09 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy.py @@ -0,0 +1,1023 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_resource_policy +description: +- A policy that can be attached to a resource to specify or schedule actions on that + resource. +short_description: Creates a GCP ResourcePolicy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The resource name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])`? which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + snapshot_schedule_policy: + description: + - Policy for creating snapshots of persistent disks. + required: false + type: dict + suboptions: + schedule: + description: + - Contains one of an `hourlySchedule`, `dailySchedule`, or `weeklySchedule`. + required: true + type: dict + suboptions: + hourly_schedule: + description: + - The policy will execute every nth hour starting at the specified time. + required: false + type: dict + suboptions: + hours_in_cycle: + description: + - The number of hours between snapshots. + required: true + type: int + start_time: + description: + - Time within the window to start the operations. + - 'It must be in an hourly format "HH:MM", where HH : [00-23] and + MM : [00] GMT.' + - 'eg: 21:00 .' + required: true + type: str + daily_schedule: + description: + - The policy will execute every nth day at the specified time. + required: false + type: dict + suboptions: + days_in_cycle: + description: + - The number of days between snapshots. + required: true + type: int + start_time: + description: + - This must be in UTC format that resolves to one of 00:00, 04:00, + 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 + are valid. + required: true + type: str + weekly_schedule: + description: + - Allows specifying a snapshot time for each day of the week. + required: false + type: dict + suboptions: + day_of_weeks: + description: + - May contain up to seven (one for each day of the week) snapshot + times. + elements: dict + required: true + type: list + suboptions: + start_time: + description: + - Time within the window to start the operations. + - 'It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] + GMT.' + required: true + type: str + day: + description: + - The day of the week to create the snapshot. e.g. MONDAY . + - 'Some valid choices include: "MONDAY", "TUESDAY", "WEDNESDAY", + "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"' + required: true + type: str + retention_policy: + description: + - Retention policy applied to snapshots created by this resource policy. + required: false + type: dict + suboptions: + max_retention_days: + description: + - Maximum age of the snapshot that is allowed to be kept. + required: true + type: int + on_source_disk_delete: + description: + - Specifies the behavior to apply to scheduled snapshots when the source + disk is deleted. + - 'Some valid choices include: "KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY"' + required: false + default: KEEP_AUTO_SNAPSHOTS + type: str + snapshot_properties: + description: + - Properties with which the snapshots are created, such as labels. + required: false + type: dict + suboptions: + labels: + description: + - A set of key-value pairs. + required: false + type: dict + storage_locations: + description: + - Cloud Storage bucket location to store the auto snapshot (regional or + multi-regional) . + elements: str + required: false + type: list + guest_flush: + description: + - Whether to perform a 'guest aware' snapshot. + required: false + type: bool + group_placement_policy: + description: + - Resource policy for instances used for placement configuration. + required: false + type: dict + suboptions: + vm_count: + description: + - Number of vms in this placement group. + required: false + type: int + availability_domain_count: + description: + - The number of availability domains instances will be spread across. If two + instances are in different availability domain, they will not be put in + the same low latency network . + required: false + type: int + collocation: + description: + - Collocation specifies whether to place VMs inside the same availability + domain on the same low-latency network. + - Specify `COLLOCATED` to enable collocation. Can only be specified with `vm_count`. + If compute instances are created with a COLLOCATED policy, then exactly + `vm_count` instances must be created at the same time with the resource + policy attached. + - 'Some valid choices include: "COLLOCATED"' + required: false + type: str + instance_schedule_policy: + description: + - Resource policy for scheduling instance operations. + required: false + type: dict + suboptions: + vm_start_schedule: + description: + - Specifies the schedule for starting instances. + required: false + type: dict + suboptions: + schedule: + description: + - Specifies the frequency for the operation, using the unix-cron format. + required: true + type: str + vm_stop_schedule: + description: + - Specifies the schedule for stopping instances. + required: false + type: dict + suboptions: + schedule: + description: + - Specifies the frequency for the operation, using the unix-cron format. + required: true + type: str + time_zone: + description: + - 'Specifies the time zone to be used in interpreting the schedule. The value + of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.' + required: true + type: str + start_time: + description: + - The start time of the schedule. The timestamp is an RFC3339 string. + required: false + type: str + expiration_time: + description: + - The expiration time of the schedule. The timestamp is an RFC3339 string. + required: false + type: str + region: + description: + - Region where resource policy resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a resource policy + google.cloud.gcp_compute_resource_policy: + name: test_object + region: us-central1 + snapshot_schedule_policy: + schedule: + daily_schedule: + days_in_cycle: 1 + start_time: '04:00' + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name of the resource, provided by the client when initially creating the resource. + The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])`? + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +snapshotSchedulePolicy: + description: + - Policy for creating snapshots of persistent disks. + returned: success + type: complex + contains: + schedule: + description: + - Contains one of an `hourlySchedule`, `dailySchedule`, or `weeklySchedule`. + returned: success + type: complex + contains: + hourlySchedule: + description: + - The policy will execute every nth hour starting at the specified time. + returned: success + type: complex + contains: + hoursInCycle: + description: + - The number of hours between snapshots. + returned: success + type: int + startTime: + description: + - Time within the window to start the operations. + - 'It must be in an hourly format "HH:MM", where HH : [00-23] and MM + : [00] GMT.' + - 'eg: 21:00 .' + returned: success + type: str + dailySchedule: + description: + - The policy will execute every nth day at the specified time. + returned: success + type: complex + contains: + daysInCycle: + description: + - The number of days between snapshots. + returned: success + type: int + startTime: + description: + - This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, + 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. + returned: success + type: str + weeklySchedule: + description: + - Allows specifying a snapshot time for each day of the week. + returned: success + type: complex + contains: + dayOfWeeks: + description: + - May contain up to seven (one for each day of the week) snapshot times. + returned: success + type: complex + contains: + startTime: + description: + - Time within the window to start the operations. + - 'It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] + GMT.' + returned: success + type: str + day: + description: + - The day of the week to create the snapshot. e.g. MONDAY . + returned: success + type: str + retentionPolicy: + description: + - Retention policy applied to snapshots created by this resource policy. + returned: success + type: complex + contains: + maxRetentionDays: + description: + - Maximum age of the snapshot that is allowed to be kept. + returned: success + type: int + onSourceDiskDelete: + description: + - Specifies the behavior to apply to scheduled snapshots when the source + disk is deleted. + returned: success + type: str + snapshotProperties: + description: + - Properties with which the snapshots are created, such as labels. + returned: success + type: complex + contains: + labels: + description: + - A set of key-value pairs. + returned: success + type: dict + storageLocations: + description: + - Cloud Storage bucket location to store the auto snapshot (regional or + multi-regional) . + returned: success + type: list + guestFlush: + description: + - Whether to perform a 'guest aware' snapshot. + returned: success + type: bool +groupPlacementPolicy: + description: + - Resource policy for instances used for placement configuration. + returned: success + type: complex + contains: + vmCount: + description: + - Number of vms in this placement group. + returned: success + type: int + availabilityDomainCount: + description: + - The number of availability domains instances will be spread across. If two + instances are in different availability domain, they will not be put in the + same low latency network . + returned: success + type: int + collocation: + description: + - Collocation specifies whether to place VMs inside the same availability domain + on the same low-latency network. + - Specify `COLLOCATED` to enable collocation. Can only be specified with `vm_count`. + If compute instances are created with a COLLOCATED policy, then exactly `vm_count` + instances must be created at the same time with the resource policy attached. + returned: success + type: str +instanceSchedulePolicy: + description: + - Resource policy for scheduling instance operations. + returned: success + type: complex + contains: + vmStartSchedule: + description: + - Specifies the schedule for starting instances. + returned: success + type: complex + contains: + schedule: + description: + - Specifies the frequency for the operation, using the unix-cron format. + returned: success + type: str + vmStopSchedule: + description: + - Specifies the schedule for stopping instances. + returned: success + type: complex + contains: + schedule: + description: + - Specifies the frequency for the operation, using the unix-cron format. + returned: success + type: str + timeZone: + description: + - 'Specifies the time zone to be used in interpreting the schedule. The value + of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.' + returned: success + type: str + startTime: + description: + - The start time of the schedule. The timestamp is an RFC3339 string. + returned: success + type: str + expirationTime: + description: + - The expiration time of the schedule. The timestamp is an RFC3339 string. + returned: success + type: str +region: + description: + - Region where resource policy resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + snapshot_schedule_policy=dict( + type='dict', + options=dict( + schedule=dict( + required=True, + type='dict', + options=dict( + hourly_schedule=dict( + type='dict', options=dict(hours_in_cycle=dict(required=True, type='int'), start_time=dict(required=True, type='str')) + ), + daily_schedule=dict( + type='dict', options=dict(days_in_cycle=dict(required=True, type='int'), start_time=dict(required=True, type='str')) + ), + weekly_schedule=dict( + type='dict', + options=dict( + day_of_weeks=dict( + required=True, + type='list', + elements='dict', + options=dict(start_time=dict(required=True, type='str'), day=dict(required=True, type='str')), + ) + ), + ), + ), + ), + retention_policy=dict( + type='dict', + options=dict(max_retention_days=dict(required=True, type='int'), on_source_disk_delete=dict(default='KEEP_AUTO_SNAPSHOTS', type='str')), + ), + snapshot_properties=dict( + type='dict', options=dict(labels=dict(type='dict'), storage_locations=dict(type='list', elements='str'), guest_flush=dict(type='bool')) + ), + ), + ), + group_placement_policy=dict( + type='dict', options=dict(vm_count=dict(type='int'), availability_domain_count=dict(type='int'), collocation=dict(type='str')) + ), + instance_schedule_policy=dict( + type='dict', + options=dict( + vm_start_schedule=dict(type='dict', options=dict(schedule=dict(required=True, type='str'))), + vm_stop_schedule=dict(type='dict', options=dict(schedule=dict(required=True, type='str'))), + time_zone=dict(required=True, type='str'), + start_time=dict(type='str'), + expiration_time=dict(type='str'), + ), + ), + region=dict(required=True, type='str'), + ), + mutually_exclusive=[['group_placement_policy', 'instance_schedule_policy', 'snapshot_schedule_policy']], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#resourcePolicy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#resourcePolicy', + u'region': module.params.get('region'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'snapshotSchedulePolicy': ResourcePolicySnapshotschedulepolicy(module.params.get('snapshot_schedule_policy', {}), module).to_request(), + u'groupPlacementPolicy': ResourcePolicyGroupplacementpolicy(module.params.get('group_placement_policy', {}), module).to_request(), + u'instanceSchedulePolicy': ResourcePolicyInstanceschedulepolicy(module.params.get('instance_schedule_policy', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/resourcePolicies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'snapshotSchedulePolicy': ResourcePolicySnapshotschedulepolicy(response.get(u'snapshotSchedulePolicy', {}), module).from_response(), + u'groupPlacementPolicy': ResourcePolicyGroupplacementpolicy(response.get(u'groupPlacementPolicy', {}), module).from_response(), + u'instanceSchedulePolicy': ResourcePolicyInstanceschedulepolicy(response.get(u'instanceSchedulePolicy', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#resourcePolicy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class ResourcePolicySnapshotschedulepolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'schedule': ResourcePolicySchedule(self.request.get('schedule', {}), self.module).to_request(), + u'retentionPolicy': ResourcePolicyRetentionpolicy(self.request.get('retention_policy', {}), self.module).to_request(), + u'snapshotProperties': ResourcePolicySnapshotproperties(self.request.get('snapshot_properties', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'schedule': ResourcePolicySchedule(self.request.get(u'schedule', {}), self.module).from_response(), + u'retentionPolicy': ResourcePolicyRetentionpolicy(self.request.get(u'retentionPolicy', {}), self.module).from_response(), + u'snapshotProperties': ResourcePolicySnapshotproperties(self.request.get(u'snapshotProperties', {}), self.module).from_response(), + } + ) + + +class ResourcePolicySchedule(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hourlySchedule': ResourcePolicyHourlyschedule(self.request.get('hourly_schedule', {}), self.module).to_request(), + u'dailySchedule': ResourcePolicyDailyschedule(self.request.get('daily_schedule', {}), self.module).to_request(), + u'weeklySchedule': ResourcePolicyWeeklyschedule(self.request.get('weekly_schedule', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hourlySchedule': ResourcePolicyHourlyschedule(self.request.get(u'hourlySchedule', {}), self.module).from_response(), + u'dailySchedule': ResourcePolicyDailyschedule(self.request.get(u'dailySchedule', {}), self.module).from_response(), + u'weeklySchedule': ResourcePolicyWeeklyschedule(self.request.get(u'weeklySchedule', {}), self.module).from_response(), + } + ) + + +class ResourcePolicyHourlyschedule(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'hoursInCycle': self.request.get('hours_in_cycle'), u'startTime': self.request.get('start_time')}) + + def from_response(self): + return remove_nones_from_dict({u'hoursInCycle': self.request.get(u'hoursInCycle'), u'startTime': self.request.get(u'startTime')}) + + +class ResourcePolicyDailyschedule(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'daysInCycle': self.request.get('days_in_cycle'), u'startTime': self.request.get('start_time')}) + + def from_response(self): + return remove_nones_from_dict({u'daysInCycle': self.request.get(u'daysInCycle'), u'startTime': self.request.get(u'startTime')}) + + +class ResourcePolicyWeeklyschedule(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'dayOfWeeks': ResourcePolicyDayofweeksArray(self.request.get('day_of_weeks', []), self.module).to_request()}) + + def from_response(self): + return remove_nones_from_dict({u'dayOfWeeks': ResourcePolicyDayofweeksArray(self.request.get(u'dayOfWeeks', []), self.module).from_response()}) + + +class ResourcePolicyDayofweeksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'startTime': item.get('start_time'), u'day': item.get('day')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'startTime': item.get(u'startTime'), u'day': item.get(u'day')}) + + +class ResourcePolicyRetentionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'maxRetentionDays': self.request.get('max_retention_days'), u'onSourceDiskDelete': self.request.get('on_source_disk_delete')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'maxRetentionDays': self.request.get(u'maxRetentionDays'), u'onSourceDiskDelete': self.request.get(u'onSourceDiskDelete')} + ) + + +class ResourcePolicySnapshotproperties(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'labels': self.request.get('labels'), u'storageLocations': self.request.get('storage_locations'), u'guestFlush': self.request.get('guest_flush')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'labels': self.request.get(u'labels'), u'storageLocations': self.request.get(u'storageLocations'), u'guestFlush': self.request.get(u'guestFlush')} + ) + + +class ResourcePolicyGroupplacementpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'vmCount': self.request.get('vm_count'), + u'availabilityDomainCount': self.request.get('availability_domain_count'), + u'collocation': self.request.get('collocation'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'vmCount': self.request.get(u'vmCount'), + u'availabilityDomainCount': self.request.get(u'availabilityDomainCount'), + u'collocation': self.request.get(u'collocation'), + } + ) + + +class ResourcePolicyInstanceschedulepolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'vmStartSchedule': ResourcePolicyVmstartschedule(self.request.get('vm_start_schedule', {}), self.module).to_request(), + u'vmStopSchedule': ResourcePolicyVmstopschedule(self.request.get('vm_stop_schedule', {}), self.module).to_request(), + u'timeZone': self.request.get('time_zone'), + u'startTime': self.request.get('start_time'), + u'expirationTime': self.request.get('expiration_time'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'vmStartSchedule': ResourcePolicyVmstartschedule(self.request.get(u'vmStartSchedule', {}), self.module).from_response(), + u'vmStopSchedule': ResourcePolicyVmstopschedule(self.request.get(u'vmStopSchedule', {}), self.module).from_response(), + u'timeZone': self.request.get(u'timeZone'), + u'startTime': self.request.get(u'startTime'), + u'expirationTime': self.request.get(u'expirationTime'), + } + ) + + +class ResourcePolicyVmstartschedule(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'schedule': self.request.get('schedule')}) + + def from_response(self): + return remove_nones_from_dict({u'schedule': self.request.get(u'schedule')}) + + +class ResourcePolicyVmstopschedule(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'schedule': self.request.get('schedule')}) + + def from_response(self): + return remove_nones_from_dict({u'schedule': self.request.get(u'schedule')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy_info.py new file mode 100644 index 000000000..1aeb54766 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_resource_policy_info.py @@ -0,0 +1,400 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_resource_policy_info +description: +- Gather info for GCP ResourcePolicy +short_description: Gather info for GCP ResourcePolicy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - Region where resource policy resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a resource policy + gcp_compute_resource_policy_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The resource name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])`? which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + snapshotSchedulePolicy: + description: + - Policy for creating snapshots of persistent disks. + returned: success + type: complex + contains: + schedule: + description: + - Contains one of an `hourlySchedule`, `dailySchedule`, or `weeklySchedule`. + returned: success + type: complex + contains: + hourlySchedule: + description: + - The policy will execute every nth hour starting at the specified time. + returned: success + type: complex + contains: + hoursInCycle: + description: + - The number of hours between snapshots. + returned: success + type: int + startTime: + description: + - Time within the window to start the operations. + - 'It must be in an hourly format "HH:MM", where HH : [00-23] and + MM : [00] GMT.' + - 'eg: 21:00 .' + returned: success + type: str + dailySchedule: + description: + - The policy will execute every nth day at the specified time. + returned: success + type: complex + contains: + daysInCycle: + description: + - The number of days between snapshots. + returned: success + type: int + startTime: + description: + - This must be in UTC format that resolves to one of 00:00, 04:00, + 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 + are valid. + returned: success + type: str + weeklySchedule: + description: + - Allows specifying a snapshot time for each day of the week. + returned: success + type: complex + contains: + dayOfWeeks: + description: + - May contain up to seven (one for each day of the week) snapshot + times. + returned: success + type: complex + contains: + startTime: + description: + - Time within the window to start the operations. + - 'It must be in format "HH:MM", where HH : [00-23] and MM : + [00-00] GMT.' + returned: success + type: str + day: + description: + - The day of the week to create the snapshot. e.g. MONDAY . + returned: success + type: str + retentionPolicy: + description: + - Retention policy applied to snapshots created by this resource policy. + returned: success + type: complex + contains: + maxRetentionDays: + description: + - Maximum age of the snapshot that is allowed to be kept. + returned: success + type: int + onSourceDiskDelete: + description: + - Specifies the behavior to apply to scheduled snapshots when the source + disk is deleted. + returned: success + type: str + snapshotProperties: + description: + - Properties with which the snapshots are created, such as labels. + returned: success + type: complex + contains: + labels: + description: + - A set of key-value pairs. + returned: success + type: dict + storageLocations: + description: + - Cloud Storage bucket location to store the auto snapshot (regional + or multi-regional) . + returned: success + type: list + guestFlush: + description: + - Whether to perform a 'guest aware' snapshot. + returned: success + type: bool + groupPlacementPolicy: + description: + - Resource policy for instances used for placement configuration. + returned: success + type: complex + contains: + vmCount: + description: + - Number of vms in this placement group. + returned: success + type: int + availabilityDomainCount: + description: + - The number of availability domains instances will be spread across. If + two instances are in different availability domain, they will not be put + in the same low latency network . + returned: success + type: int + collocation: + description: + - Collocation specifies whether to place VMs inside the same availability + domain on the same low-latency network. + - Specify `COLLOCATED` to enable collocation. Can only be specified with + `vm_count`. If compute instances are created with a COLLOCATED policy, + then exactly `vm_count` instances must be created at the same time with + the resource policy attached. + returned: success + type: str + instanceSchedulePolicy: + description: + - Resource policy for scheduling instance operations. + returned: success + type: complex + contains: + vmStartSchedule: + description: + - Specifies the schedule for starting instances. + returned: success + type: complex + contains: + schedule: + description: + - Specifies the frequency for the operation, using the unix-cron format. + returned: success + type: str + vmStopSchedule: + description: + - Specifies the schedule for stopping instances. + returned: success + type: complex + contains: + schedule: + description: + - Specifies the frequency for the operation, using the unix-cron format. + returned: success + type: str + timeZone: + description: + - 'Specifies the time zone to be used in interpreting the schedule. The + value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.' + returned: success + type: str + startTime: + description: + - The start time of the schedule. The timestamp is an RFC3339 string. + returned: success + type: str + expirationTime: + description: + - The expiration time of the schedule. The timestamp is an RFC3339 string. + returned: success + type: str + region: + description: + - Region where resource policy resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/resourcePolicies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_route.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_route.py new file mode 100644 index 000000000..3da7d0fa6 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_route.py @@ -0,0 +1,537 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_route +description: +- Represents a Route resource. +- A route is a rule that specifies how certain packets should be handled by the virtual + network. Routes are associated with virtual machines by tag, and the set of routes + for a particular virtual machine is called its routing table. For each packet leaving + a virtual machine, the system searches that virtual machine's routing table for + a single best matching route. +- Routes match packets by destination IP address, preferring smaller or more specific + ranges over larger ones. If there is a tie, the system selects the route with the + smallest priority value. If there is still a tie, it uses the layer three and four + packet headers to select just one of the remaining matching routes. The packet is + then forwarded as specified by the next_hop field of the winning route -- either + to another virtual machine destination, a virtual machine gateway or a Compute Engine-operated + gateway. Packets that do not match any route in the sending virtual machine's routing + table will be dropped. +- A Route resource must have exactly one specification of either nextHopGateway, nextHopInstance, + nextHopIp, nextHopVpnTunnel, or nextHopIlb. +short_description: Creates a GCP Route +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + dest_range: + description: + - The destination range of outgoing packets that this route applies to. + - Only IPv4 is supported. + required: true + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + network: + description: + - The network that this route applies to. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: true + type: dict + priority: + description: + - The priority of this route. Priority is used to break ties in cases where there + is more than one matching route of equal prefix length. + - In the case of two routes with equal prefix length, the one with the lowest-numbered + priority value wins. + - Default value is 1000. Valid range is 0 through 65535. + required: false + type: int + tags: + description: + - A list of instance tags to which this route applies. + elements: str + required: false + type: list + next_hop_gateway: + description: + - URL to a gateway that should handle matching packets. + - 'Currently, you can only specify the internet gateway, using a full or partial + valid URL: * U(https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway) + * projects/project/global/gateways/default-internet-gateway * global/gateways/default-internet-gateway + .' + required: false + type: str + next_hop_instance: + description: + - URL to an instance that should handle matching packets. + - 'You can specify this as a full or partial URL. For example: * U(https://www.googleapis.com/compute/v1/projects/project/zones/zone/) + instances/instance * projects/project/zones/zone/instances/instance * zones/zone/instances/instance + .' + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_instance task and then set this next_hop_instance field to + "{{ name-of-resource }}"' + required: false + type: dict + next_hop_ip: + description: + - Network IP address of an instance that should handle matching packets. + required: false + type: str + next_hop_vpn_tunnel: + description: + - URL to a VpnTunnel that should handle matching packets. + - 'This field represents a link to a VpnTunnel resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_vpn_tunnel task and then set this next_hop_vpn_tunnel field + to "{{ name-of-resource }}"' + required: false + type: dict + next_hop_ilb: + description: + - The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should + handle matching packets. + - 'You can only specify the forwarding rule as a partial or full URL. For example, + the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule) + regions/region/forwardingRules/forwardingRule Note that this can only be used + when the destinationRange is a public (non-RFC 1918) IP CIDR range.' + - 'This field represents a link to a ForwardingRule resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_forwarding_rule task and then set this next_hop_ilb + field to "{{ name-of-resource }}"' + required: false + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/routes)' +- 'Using Routes: U(https://cloud.google.com/vpc/docs/using-routes)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-route + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a route + google.cloud.gcp_compute_route: + name: test_object + dest_range: 192.168.6.0/24 + next_hop_gateway: global/gateways/default-internet-gateway + network: "{{ network }}" + tags: + - backends + - databases + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +destRange: + description: + - The destination range of outgoing packets that this route applies to. + - Only IPv4 is supported. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +network: + description: + - The network that this route applies to. + returned: success + type: dict +priority: + description: + - The priority of this route. Priority is used to break ties in cases where there + is more than one matching route of equal prefix length. + - In the case of two routes with equal prefix length, the one with the lowest-numbered + priority value wins. + - Default value is 1000. Valid range is 0 through 65535. + returned: success + type: int +tags: + description: + - A list of instance tags to which this route applies. + returned: success + type: list +nextHopGateway: + description: + - URL to a gateway that should handle matching packets. + - 'Currently, you can only specify the internet gateway, using a full or partial + valid URL: * U(https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway) + * projects/project/global/gateways/default-internet-gateway * global/gateways/default-internet-gateway + .' + returned: success + type: str +nextHopInstance: + description: + - URL to an instance that should handle matching packets. + - 'You can specify this as a full or partial URL. For example: * U(https://www.googleapis.com/compute/v1/projects/project/zones/zone/) + instances/instance * projects/project/zones/zone/instances/instance * zones/zone/instances/instance + .' + returned: success + type: dict +nextHopIp: + description: + - Network IP address of an instance that should handle matching packets. + returned: success + type: str +nextHopVpnTunnel: + description: + - URL to a VpnTunnel that should handle matching packets. + returned: success + type: dict +nextHopNetwork: + description: + - URL to a Network that should handle matching packets. + returned: success + type: str +nextHopIlb: + description: + - The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should + handle matching packets. + - 'You can only specify the forwarding rule as a partial or full URL. For example, + the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule) + regions/region/forwardingRules/forwardingRule Note that this can only be used + when the destinationRange is a public (non-RFC 1918) IP CIDR range.' + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + dest_range=dict(required=True, type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + network=dict(required=True, type='dict'), + priority=dict(type='int'), + tags=dict(type='list', elements='str'), + next_hop_gateway=dict(type='str'), + next_hop_instance=dict(type='dict'), + next_hop_ip=dict(type='str'), + next_hop_vpn_tunnel=dict(type='dict'), + next_hop_ilb=dict(type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#route' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#route', + u'destRange': module.params.get('dest_range'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'priority': module.params.get('priority'), + u'tags': module.params.get('tags'), + u'nextHopGateway': module.params.get('next_hop_gateway'), + u'nextHopInstance': replace_resource_dict(module.params.get(u'next_hop_instance', {}), 'selfLink'), + u'nextHopIp': module.params.get('next_hop_ip'), + u'nextHopVpnTunnel': replace_resource_dict(module.params.get(u'next_hop_vpn_tunnel', {}), 'selfLink'), + u'nextHopIlb': replace_resource_dict(module.params.get(u'next_hop_ilb', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/routes/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/routes".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'destRange': response.get(u'destRange'), + u'description': response.get(u'description'), + u'name': response.get(u'name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'priority': module.params.get('priority'), + u'tags': module.params.get('tags'), + u'nextHopGateway': module.params.get('next_hop_gateway'), + u'nextHopInstance': replace_resource_dict(module.params.get(u'next_hop_instance', {}), 'selfLink'), + u'nextHopIp': module.params.get('next_hop_ip'), + u'nextHopVpnTunnel': replace_resource_dict(module.params.get(u'next_hop_vpn_tunnel', {}), 'selfLink'), + u'nextHopNetwork': response.get(u'nextHopNetwork'), + u'nextHopIlb': replace_resource_dict(module.params.get(u'next_hop_ilb', {}), 'selfLink'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#route') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_route_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_route_info.py new file mode 100644 index 000000000..034a3155e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_route_info.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_route_info +description: +- Gather info for GCP Route +short_description: Gather info for GCP Route +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a route + gcp_compute_route_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + destRange: + description: + - The destination range of outgoing packets that this route applies to. + - Only IPv4 is supported. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - The network that this route applies to. + returned: success + type: dict + priority: + description: + - The priority of this route. Priority is used to break ties in cases where + there is more than one matching route of equal prefix length. + - In the case of two routes with equal prefix length, the one with the lowest-numbered + priority value wins. + - Default value is 1000. Valid range is 0 through 65535. + returned: success + type: int + tags: + description: + - A list of instance tags to which this route applies. + returned: success + type: list + nextHopGateway: + description: + - URL to a gateway that should handle matching packets. + - 'Currently, you can only specify the internet gateway, using a full or partial + valid URL: * U(https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway) + * projects/project/global/gateways/default-internet-gateway * global/gateways/default-internet-gateway + .' + returned: success + type: str + nextHopInstance: + description: + - URL to an instance that should handle matching packets. + - 'You can specify this as a full or partial URL. For example: * U(https://www.googleapis.com/compute/v1/projects/project/zones/zone/) + instances/instance * projects/project/zones/zone/instances/instance * zones/zone/instances/instance + .' + returned: success + type: dict + nextHopIp: + description: + - Network IP address of an instance that should handle matching packets. + returned: success + type: str + nextHopVpnTunnel: + description: + - URL to a VpnTunnel that should handle matching packets. + returned: success + type: dict + nextHopNetwork: + description: + - URL to a Network that should handle matching packets. + returned: success + type: str + nextHopIlb: + description: + - The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should + handle matching packets. + - 'You can only specify the forwarding rule as a partial or full URL. For example, + the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule) + regions/region/forwardingRules/forwardingRule Note that this can only be used + when the destinationRange is a public (non-RFC 1918) IP CIDR range.' + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/routes".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_router.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_router.py new file mode 100644 index 000000000..77efcbc40 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_router.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_router +description: +- Represents a Router resource. +short_description: Creates a GCP Router +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + network: + description: + - A reference to the network to which this router belongs. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: true + type: dict + bgp: + description: + - BGP information specific to this router. + required: false + type: dict + suboptions: + asn: + description: + - Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, + either 16-bit or 32-bit. The value will be fixed for this router resource. + All VPN tunnels that link to this router will have the same local ASN. + required: true + type: int + advertise_mode: + description: + - User-specified flag to indicate which mode to use for advertisement. + - 'Some valid choices include: "DEFAULT", "CUSTOM"' + required: false + default: DEFAULT + type: str + advertised_groups: + description: + - User-specified list of prefix groups to advertise in custom mode. + - This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These groups will be advertised in addition + to any specified prefixes. Leave this field blank to advertise no custom + groups. + - 'This enum field has the one valid value: ALL_SUBNETS .' + elements: str + required: false + type: list + advertised_ip_ranges: + description: + - User-specified list of individual IP ranges to advertise in custom mode. + This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These IP ranges will be advertised in addition + to any specified groups. + - Leave this field blank to advertise no custom IP ranges. + elements: dict + required: false + type: list + suboptions: + range: + description: + - The IP range to advertise. The value must be a CIDR-formatted string. + required: true + type: str + description: + description: + - User-specified description for the IP range. + required: false + type: str + region: + description: + - Region where the router resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/routers)' +- 'Google Cloud Router: U(https://cloud.google.com/router/docs/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-router + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a router + google.cloud.gcp_compute_router: + name: test_object + network: "{{ network }}" + bgp: + asn: 64514 + advertise_mode: CUSTOM + advertised_groups: + - ALL_SUBNETS + advertised_ip_ranges: + - range: 1.2.3.4 + - range: 6.7.0.0/16 + region: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +id: + description: + - The unique identifier for the resource. + returned: success + type: int +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +network: + description: + - A reference to the network to which this router belongs. + returned: success + type: dict +bgp: + description: + - BGP information specific to this router. + returned: success + type: complex + contains: + asn: + description: + - Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, + either 16-bit or 32-bit. The value will be fixed for this router resource. + All VPN tunnels that link to this router will have the same local ASN. + returned: success + type: int + advertiseMode: + description: + - User-specified flag to indicate which mode to use for advertisement. + returned: success + type: str + advertisedGroups: + description: + - User-specified list of prefix groups to advertise in custom mode. + - This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These groups will be advertised in addition to + any specified prefixes. Leave this field blank to advertise no custom groups. + - 'This enum field has the one valid value: ALL_SUBNETS .' + returned: success + type: list + advertisedIpRanges: + description: + - User-specified list of individual IP ranges to advertise in custom mode. This + field can only be populated if advertiseMode is CUSTOM and is advertised to + all peers of the router. These IP ranges will be advertised in addition to + any specified groups. + - Leave this field blank to advertise no custom IP ranges. + returned: success + type: complex + contains: + range: + description: + - The IP range to advertise. The value must be a CIDR-formatted string. + returned: success + type: str + description: + description: + - User-specified description for the IP range. + returned: success + type: str +region: + description: + - Region where the router resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + network=dict(required=True, type='dict'), + bgp=dict( + type='dict', + options=dict( + asn=dict(required=True, type='int'), + advertise_mode=dict(default='DEFAULT', type='str'), + advertised_groups=dict(type='list', elements='str'), + advertised_ip_ranges=dict(type='list', elements='dict', options=dict(range=dict(required=True, type='str'), description=dict(type='str'))), + ), + ), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#router' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.patch(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#router', + u'region': module.params.get('region'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'bgp': RouterBgp(module.params.get('bgp', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'id': response.get(u'id'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'name': module.params.get('name'), + u'description': response.get(u'description'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'bgp': RouterBgp(response.get(u'bgp', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#router') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class RouterBgp(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'asn': self.request.get('asn'), + u'advertiseMode': self.request.get('advertise_mode'), + u'advertisedGroups': self.request.get('advertised_groups'), + u'advertisedIpRanges': RouterAdvertisediprangesArray(self.request.get('advertised_ip_ranges', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'asn': self.request.get(u'asn'), + u'advertiseMode': self.request.get(u'advertiseMode'), + u'advertisedGroups': self.request.get(u'advertisedGroups'), + u'advertisedIpRanges': RouterAdvertisediprangesArray(self.request.get(u'advertisedIpRanges', []), self.module).from_response(), + } + ) + + +class RouterAdvertisediprangesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'range': item.get('range'), u'description': item.get('description')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'range': item.get(u'range'), u'description': item.get(u'description')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_router_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_router_info.py new file mode 100644 index 000000000..25e148dfa --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_router_info.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_router_info +description: +- Gather info for GCP Router +short_description: Gather info for GCP Router +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - Region where the router resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a router + gcp_compute_router_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the resource. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + network: + description: + - A reference to the network to which this router belongs. + returned: success + type: dict + bgp: + description: + - BGP information specific to this router. + returned: success + type: complex + contains: + asn: + description: + - Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, + either 16-bit or 32-bit. The value will be fixed for this router resource. + All VPN tunnels that link to this router will have the same local ASN. + returned: success + type: int + advertiseMode: + description: + - User-specified flag to indicate which mode to use for advertisement. + returned: success + type: str + advertisedGroups: + description: + - User-specified list of prefix groups to advertise in custom mode. + - This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These groups will be advertised in addition + to any specified prefixes. Leave this field blank to advertise no custom + groups. + - 'This enum field has the one valid value: ALL_SUBNETS .' + returned: success + type: list + advertisedIpRanges: + description: + - User-specified list of individual IP ranges to advertise in custom mode. + This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These IP ranges will be advertised in addition + to any specified groups. + - Leave this field blank to advertise no custom IP ranges. + returned: success + type: complex + contains: + range: + description: + - The IP range to advertise. The value must be a CIDR-formatted string. + returned: success + type: str + description: + description: + - User-specified description for the IP range. + returned: success + type: str + region: + description: + - Region where the router resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot.py new file mode 100644 index 000000000..7f2a61695 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_snapshot +description: +- Represents a Persistent Disk Snapshot resource. +- Use snapshots to back up data from your persistent disks. Snapshots are different + from public images and custom images, which are used primarily to create instances + or configure instance templates. Snapshots are useful for periodic backup of the + data on your persistent disks. You can create snapshots from persistent disks even + while they are attached to running instances. +- Snapshots are incremental, so you can create regular snapshots on a persistent disk + faster and at a much lower cost than if you regularly created a full image of the + disk. +short_description: Creates a GCP Snapshot +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + storage_locations: + description: + - Cloud Storage bucket storage location of the snapshot (regional or multi-regional). + elements: str + required: false + type: list + labels: + description: + - Labels to apply to this Snapshot. + required: false + type: dict + source_disk: + description: + - A reference to the disk used to create this snapshot. + - 'This field represents a link to a Disk resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_compute_disk task and then set this source_disk field to "{{ name-of-resource + }}"' + required: true + type: dict + zone: + description: + - A reference to the zone where the disk is hosted. + required: false + type: str + snapshot_encryption_key: + description: + - The customer-supplied encryption key of the snapshot. Required if the source + snapshot is protected by a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + kms_key_name: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + required: false + type: str + kms_key_service_account: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + required: false + type: str + source_disk_encryption_key: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + required: false + type: dict + suboptions: + raw_key: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + required: false + type: str + kms_key_name: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + required: false + type: str + kms_key_service_account: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/snapshots)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/disks/create-snapshots)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a disk + google.cloud.gcp_compute_disk: + name: disk-snapshot + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: disk + +- name: create a snapshot + google.cloud.gcp_compute_snapshot: + name: test_object + source_disk: "{{ disk }}" + zone: us-central1-a + labels: + my_label: value + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +diskSizeGb: + description: + - Size of the snapshot, specified in GB. + returned: success + type: int +name: + description: + - Name of the resource; provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +storageBytes: + description: + - A size of the storage used by the snapshot. As snapshots share storage, this number + is expected to change with snapshot creation/deletion. + returned: success + type: int +storageLocations: + description: + - Cloud Storage bucket storage location of the snapshot (regional or multi-regional). + returned: success + type: list +licenses: + description: + - A list of public visible licenses that apply to this snapshot. This can be because + the original image had licenses attached (such as a Windows image). snapshotEncryptionKey + nested object Encrypts the snapshot using a customer-supplied encryption key. + returned: success + type: list +labels: + description: + - Labels to apply to this Snapshot. + returned: success + type: dict +labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str +sourceDisk: + description: + - A reference to the disk used to create this snapshot. + returned: success + type: dict +zone: + description: + - A reference to the zone where the disk is hosted. + returned: success + type: str +snapshotEncryptionKey: + description: + - The customer-supplied encryption key of the snapshot. Required if the source snapshot + is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str +sourceDiskEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the source + snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + storage_locations=dict(type='list', elements='str'), + labels=dict(type='dict'), + source_disk=dict(required=True, type='dict'), + zone=dict(type='str'), + snapshot_encryption_key=dict( + type='dict', no_log=True, options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'), kms_key_service_account=dict(type='str')) + ), + source_disk_encryption_key=dict( + type='dict', no_log=True, options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'), kms_key_service_account=dict(type='str')) + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#snapshot' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('labels') != request.get('labels'): + labels_update(module, request, response) + + +def labels_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/snapshots/{name}/setLabels"]).format(**module.params), + {u'labels': module.params.get('labels'), u'labelFingerprint': response.get('labelFingerprint')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#snapshot', + u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'name'), + u'zone': module.params.get('zone'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'storageLocations': module.params.get('storage_locations'), + u'labels': module.params.get('labels'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/snapshots/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/snapshots".format(**module.params) + + +def create_link(module): + res = {'project': module.params['project'], 'zone': module.params['zone'], 'source_disk': replace_resource_dict(module.params['source_disk'], 'name')} + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{source_disk}/createSnapshot".format(**res) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'id': response.get(u'id'), + u'diskSizeGb': response.get(u'diskSizeGb'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'storageBytes': response.get(u'storageBytes'), + u'storageLocations': response.get(u'storageLocations'), + u'licenses': response.get(u'licenses'), + u'labels': response.get(u'labels'), + u'labelFingerprint': response.get(u'labelFingerprint'), + } + + +def license_selflink(name, params): + if name is None: + return + url = r"https://compute.googleapis.com/compute/v1//projects/.*/global/licenses/.*" + if not re.match(url, name): + name = "https://compute.googleapis.com/compute/v1//projects/{project}/global/licenses/%s".format(**params) % name + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#snapshot') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = navigate_hash(op_result, ['selfLink']) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class SnapshotSnapshotencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get('raw_key'), + u'kmsKeyName': self.request.get('kms_key_name'), + u'kmsKeyServiceAccount': self.request.get('kms_key_service_account'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get(u'rawKey'), + u'kmsKeyName': self.request.get(u'kmsKeyName'), + u'kmsKeyServiceAccount': self.request.get(u'kmsKeyServiceAccount'), + } + ) + + +class SnapshotSourcediskencryptionkey(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get('raw_key'), + u'kmsKeyName': self.request.get('kms_key_name'), + u'kmsKeyServiceAccount': self.request.get('kms_key_service_account'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'rawKey': self.request.get(u'rawKey'), + u'kmsKeyName': self.request.get(u'kmsKeyName'), + u'kmsKeyServiceAccount': self.request.get(u'kmsKeyServiceAccount'), + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot_info.py new file mode 100644 index 000000000..e1d98952f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_snapshot_info.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_snapshot_info +description: +- Gather info for GCP Snapshot +short_description: Gather info for GCP Snapshot +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a snapshot + gcp_compute_snapshot_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + diskSizeGb: + description: + - Size of the snapshot, specified in GB. + returned: success + type: int + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + storageBytes: + description: + - A size of the storage used by the snapshot. As snapshots share storage, this + number is expected to change with snapshot creation/deletion. + returned: success + type: int + storageLocations: + description: + - Cloud Storage bucket storage location of the snapshot (regional or multi-regional). + returned: success + type: list + licenses: + description: + - A list of public visible licenses that apply to this snapshot. This can be + because the original image had licenses attached (such as a Windows image). + snapshotEncryptionKey nested object Encrypts the snapshot using a customer-supplied + encryption key. + returned: success + type: list + labels: + description: + - Labels to apply to this Snapshot. + returned: success + type: dict + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + sourceDisk: + description: + - A reference to the disk used to create this snapshot. + returned: success + type: dict + zone: + description: + - A reference to the zone where the disk is hosted. + returned: success + type: str + snapshotEncryptionKey: + description: + - The customer-supplied encryption key of the snapshot. Required if the source + snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS + key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str + sourceDiskEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + kmsKeyServiceAccount: + description: + - The service account used for the encryption request for the given KMS + key. + - If absent, the Compute Engine Service Agent service account is used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/snapshots".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate.py new file mode 100644 index 000000000..15ddabafc --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_ssl_certificate +description: +- An SslCertificate resource, used for HTTPS load balancing. This resource provides + a mechanism to upload an SSL key and certificate to the load balancer to serve secure + connections from the user. +short_description: Creates a GCP SslCertificate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + certificate: + description: + - The certificate in PEM format. + - The certificate chain must be no greater than 5 certs long. + - The chain must include at least one intermediate cert. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: false + type: str + private_key: + description: + - The write-only private key in PEM format. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates)' +- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/ssl-certificates)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a SSL certificate + google.cloud.gcp_compute_ssl_certificate: + name: test_object + description: A certificate for testing. Do not use this certificate in production + certificate: |- + -----BEGIN CERTIFICATE----- + MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG + EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT + BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm + b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN + AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2 + MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP + BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM + FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z + aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH + KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ + 4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O + BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn + 0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O + M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ + zqGNhIPGq2ULqXKK8BY= + -----END CERTIFICATE----- + private_key: |- + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49 + AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f + OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ== + -----END EC PRIVATE KEY----- + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +certificate: + description: + - The certificate in PEM format. + - The certificate chain must be no greater than 5 certs long. + - The chain must include at least one intermediate cert. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +privateKey: + description: + - The write-only private key in PEM format. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + certificate=dict(required=True, type='str'), + description=dict(type='str'), + name=dict(type='str'), + private_key=dict(required=True, type='str', no_log=True), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#sslCertificate' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#sslCertificate', + u'certificate': module.params.get('certificate'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'privateKey': module.params.get('private_key'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/sslCertificates/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/sslCertificates".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'certificate': response.get(u'certificate'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'privateKey': module.params.get('private_key'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#sslCertificate') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate_info.py new file mode 100644 index 000000000..e030ce834 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_certificate_info.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_ssl_certificate_info +description: +- Gather info for GCP SslCertificate +short_description: Gather info for GCP SslCertificate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a SSL certificate + gcp_compute_ssl_certificate_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + certificate: + description: + - The certificate in PEM format. + - The certificate chain must be no greater than 5 certs long. + - The chain must include at least one intermediate cert. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + privateKey: + description: + - The write-only private key in PEM format. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/sslCertificates".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy.py new file mode 100644 index 000000000..57cda0d2f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy.py @@ -0,0 +1,462 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_ssl_policy +description: +- Represents a SSL policy. SSL policies give you the ability to control the features + of SSL that your SSL proxy or HTTPS load balancer negotiates. +short_description: Creates a GCP SslPolicy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + profile: + description: + - Profile specifies the set of SSL features that can be used by the load balancer + when negotiating SSL with clients. If using `CUSTOM`, the set of SSL features + to enable must be specified in the `customFeatures` field. + - 'Some valid choices include: "COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM"' + required: false + type: str + min_tls_version: + description: + - The minimum version of SSL protocol that can be used by the clients to establish + a connection with the load balancer. + - 'Some valid choices include: "TLS_1_0", "TLS_1_1", "TLS_1_2"' + required: false + type: str + custom_features: + description: + - A list of features enabled when the selected profile is CUSTOM. The method returns + the set of features that can be specified in this list. This field must be empty + if the profile is not CUSTOM. + elements: str + required: false + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/sslPolicies)' +- 'Using SSL Policies: U(https://cloud.google.com/compute/docs/load-balancing/ssl-policies)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a SSL policy + google.cloud.gcp_compute_ssl_policy: + name: test_object + profile: CUSTOM + min_tls_version: TLS_1_2 + custom_features: + - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +profile: + description: + - Profile specifies the set of SSL features that can be used by the load balancer + when negotiating SSL with clients. If using `CUSTOM`, the set of SSL features + to enable must be specified in the `customFeatures` field. + returned: success + type: str +minTlsVersion: + description: + - The minimum version of SSL protocol that can be used by the clients to establish + a connection with the load balancer. + returned: success + type: str +enabledFeatures: + description: + - The list of features enabled in the SSL policy. + returned: success + type: list +customFeatures: + description: + - A list of features enabled when the selected profile is CUSTOM. The method returns + the set of features that can be specified in this list. This field must be empty + if the profile is not CUSTOM. + returned: success + type: list +fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. + returned: success + type: str +warnings: + description: + - If potential misconfigurations are detected for this SSL policy, this field will + be populated with warning messages. + returned: success + type: complex + contains: + code: + description: + - A warning code, if applicable. + returned: success + type: str + message: + description: + - A human-readable description of the warning code. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + profile=dict(type='str'), + min_tls_version=dict(type='str'), + custom_features=dict(type='list', elements='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#sslPolicy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.patch(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#sslPolicy', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'profile': module.params.get('profile'), + u'minTlsVersion': module.params.get('min_tls_version'), + u'customFeatures': module.params.get('custom_features'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/sslPolicies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/sslPolicies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'profile': response.get(u'profile'), + u'minTlsVersion': response.get(u'minTlsVersion'), + u'enabledFeatures': response.get(u'enabledFeatures'), + u'customFeatures': response.get(u'customFeatures'), + u'fingerprint': response.get(u'fingerprint'), + u'warnings': SslPolicyWarningsArray(response.get(u'warnings', []), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#sslPolicy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class SslPolicyWarningsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({}) + + def _response_from_item(self, item): + return remove_nones_from_dict({}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy_info.py new file mode 100644 index 000000000..a194ebec7 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_ssl_policy_info.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_ssl_policy_info +description: +- Gather info for GCP SslPolicy +short_description: Gather info for GCP SslPolicy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a SSL policy + gcp_compute_ssl_policy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + profile: + description: + - Profile specifies the set of SSL features that can be used by the load balancer + when negotiating SSL with clients. If using `CUSTOM`, the set of SSL features + to enable must be specified in the `customFeatures` field. + returned: success + type: str + minTlsVersion: + description: + - The minimum version of SSL protocol that can be used by the clients to establish + a connection with the load balancer. + returned: success + type: str + enabledFeatures: + description: + - The list of features enabled in the SSL policy. + returned: success + type: list + customFeatures: + description: + - A list of features enabled when the selected profile is CUSTOM. The method + returns the set of features that can be specified in this list. This field + must be empty if the profile is not CUSTOM. + returned: success + type: list + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + warnings: + description: + - If potential misconfigurations are detected for this SSL policy, this field + will be populated with warning messages. + returned: success + type: complex + contains: + code: + description: + - A warning code, if applicable. + returned: success + type: str + message: + description: + - A human-readable description of the warning code. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/sslPolicies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork.py new file mode 100644 index 000000000..3fc743802 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork.py @@ -0,0 +1,586 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_subnetwork +description: +- A VPC network is a virtual version of the traditional physical networks that exist + within and between physical data centers. A VPC network provides connectivity for + your Compute Engine virtual machine (VM) instances, Container Engine containers, + App Engine Flex services, and other network-related resources. +- Each GCP project contains one or more VPC networks. Each VPC network is a global + entity spanning all GCP regions. This global VPC network allows VM instances and + other resources to communicate with each other via internal, private IP addresses. +- Each VPC network is subdivided into subnets, and each subnet is contained within + a single region. You can have more than one subnet in a region for a given VPC network. + Each subnet has a contiguous private RFC1918 IP space. You create instances, containers, + and the like in these subnets. +- When you create an instance, you must create it in a subnet, and the instance draws + its internal IP address from that subnet. +- Virtual machine (VM) instances in a VPC network can communicate with instances in + all other subnets of the same VPC network, regardless of region, using their RFC1918 + private IP addresses. You can isolate portions of the network, even entire subnets, + using firewall rules. +short_description: Creates a GCP Subnetwork +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. This field can be set only at resource creation time. + required: false + type: str + ip_cidr_range: + description: + - The range of internal addresses that are owned by this subnetwork. + - Provide this property when you create the subnetwork. For example, 10.0.0.0/8 + or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. + Only IPv4 is supported. + required: true + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + network: + description: + - The network this subnet belongs to. + - Only networks that are in the distributed mode can have subnetworks. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: true + type: dict + secondary_ip_ranges: + description: + - An array of configurations for secondary IP ranges for VM instances contained + in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange + of the subnetwork. The alias IPs may belong to either primary or secondary ranges. + elements: dict + required: false + type: list + suboptions: + range_name: + description: + - The name associated with this subnetwork secondary range, used when adding + an alias IP range to a VM instance. The name must be 1-63 characters long, + and comply with RFC1035. The name must be unique within the subnetwork. + required: true + type: str + ip_cidr_range: + description: + - The range of IP addresses belonging to this subnetwork secondary range. + Provide this property when you create the subnetwork. + - Ranges must be unique and non-overlapping with all primary and secondary + IP ranges within a network. Only IPv4 is supported. + required: true + type: str + private_ip_google_access: + description: + - When enabled, VMs in this subnetwork without external IP addresses can access + Google APIs and services by using Private Google Access. + required: false + type: bool + private_ipv6_google_access: + description: + - The private IPv6 google access type for the VMs in this subnet. + required: false + type: str + region: + description: + - The GCP region for this subnetwork. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks)' +- 'Private Google Access: U(https://cloud.google.com/vpc/docs/configure-private-google-access)' +- 'Cloud Networking: U(https://cloud.google.com/vpc/docs/using-vpc)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-subnetwork + auto_create_subnetworks: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a subnetwork + google.cloud.gcp_compute_subnetwork: + name: ansiblenet + region: us-west1 + network: "{{ network }}" + ip_cidr_range: 172.16.0.0/16 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. This field can be set only at resource creation time. + returned: success + type: str +gatewayAddress: + description: + - The gateway address for default routes to reach destination addresses outside + this subnetwork. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +ipCidrRange: + description: + - The range of internal addresses that are owned by this subnetwork. + - Provide this property when you create the subnetwork. For example, 10.0.0.0/8 + or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. + Only IPv4 is supported. + returned: success + type: str +name: + description: + - The name of the resource, provided by the client when initially creating the resource. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +network: + description: + - The network this subnet belongs to. + - Only networks that are in the distributed mode can have subnetworks. + returned: success + type: dict +secondaryIpRanges: + description: + - An array of configurations for secondary IP ranges for VM instances contained + in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange + of the subnetwork. The alias IPs may belong to either primary or secondary ranges. + returned: success + type: complex + contains: + rangeName: + description: + - The name associated with this subnetwork secondary range, used when adding + an alias IP range to a VM instance. The name must be 1-63 characters long, + and comply with RFC1035. The name must be unique within the subnetwork. + returned: success + type: str + ipCidrRange: + description: + - The range of IP addresses belonging to this subnetwork secondary range. Provide + this property when you create the subnetwork. + - Ranges must be unique and non-overlapping with all primary and secondary IP + ranges within a network. Only IPv4 is supported. + returned: success + type: str +privateIpGoogleAccess: + description: + - When enabled, VMs in this subnetwork without external IP addresses can access + Google APIs and services by using Private Google Access. + returned: success + type: bool +privateIpv6GoogleAccess: + description: + - The private IPv6 google access type for the VMs in this subnet. + returned: success + type: str +region: + description: + - The GCP region for this subnetwork. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + ip_cidr_range=dict(required=True, type='str'), + name=dict(required=True, type='str'), + network=dict(required=True, type='dict'), + secondary_ip_ranges=dict( + type='list', elements='dict', options=dict(range_name=dict(required=True, type='str'), ip_cidr_range=dict(required=True, type='str')) + ), + private_ip_google_access=dict(type='bool'), + private_ipv6_google_access=dict(type='str'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#subnetwork' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('ipCidrRange') != request.get('ipCidrRange'): + ip_cidr_range_update(module, request, response) + if response.get('secondaryIpRanges') != request.get('secondaryIpRanges') or response.get('privateIpv6GoogleAccess') != request.get( + 'privateIpv6GoogleAccess' + ): + secondary_ip_ranges_update(module, request, response) + if response.get('privateIpGoogleAccess') != request.get('privateIpGoogleAccess'): + private_ip_google_access_update(module, request, response) + + +def ip_cidr_range_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/subnetworks/{name}/expandIpCidrRange"]).format( + **module.params + ), + {u'ipCidrRange': module.params.get('ip_cidr_range')}, + ) + + +def secondary_ip_ranges_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.patch( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/subnetworks/{name}"]).format(**module.params), + { + u'secondaryIpRanges': SubnetworkSecondaryiprangesArray(module.params.get('secondary_ip_ranges', []), module).to_request(), + u'privateIpv6GoogleAccess': module.params.get('private_ipv6_google_access'), + }, + ) + + +def private_ip_google_access_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/subnetworks/{name}/setPrivateIpGoogleAccess"]).format( + **module.params + ), + {u'privateIpGoogleAccess': module.params.get('private_ip_google_access')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#subnetwork', + u'description': module.params.get('description'), + u'ipCidrRange': module.params.get('ip_cidr_range'), + u'name': module.params.get('name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'secondaryIpRanges': SubnetworkSecondaryiprangesArray(module.params.get('secondary_ip_ranges', []), module).to_request(), + u'privateIpGoogleAccess': module.params.get('private_ip_google_access'), + u'privateIpv6GoogleAccess': module.params.get('private_ipv6_google_access'), + u'region': module.params.get('region'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'gatewayAddress': response.get(u'gatewayAddress'), + u'id': response.get(u'id'), + u'ipCidrRange': response.get(u'ipCidrRange'), + u'name': response.get(u'name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'secondaryIpRanges': SubnetworkSecondaryiprangesArray(response.get(u'secondaryIpRanges', []), module).from_response(), + u'privateIpGoogleAccess': response.get(u'privateIpGoogleAccess'), + u'privateIpv6GoogleAccess': response.get(u'privateIpv6GoogleAccess'), + u'region': module.params.get('region'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#subnetwork') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class SubnetworkSecondaryiprangesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'rangeName': item.get('range_name'), u'ipCidrRange': item.get('ip_cidr_range')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'rangeName': item.get(u'rangeName'), u'ipCidrRange': item.get(u'ipCidrRange')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork_info.py new file mode 100644 index 000000000..428e35dc5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_subnetwork_info.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_subnetwork_info +description: +- Gather info for GCP Subnetwork +short_description: Gather info for GCP Subnetwork +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The GCP region for this subnetwork. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a subnetwork + gcp_compute_subnetwork_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. This field can be set only at resource creation time. + returned: success + type: str + gatewayAddress: + description: + - The gateway address for default routes to reach destination addresses outside + this subnetwork. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + ipCidrRange: + description: + - The range of internal addresses that are owned by this subnetwork. + - Provide this property when you create the subnetwork. For example, 10.0.0.0/8 + or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. + Only IPv4 is supported. + returned: success + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + network: + description: + - The network this subnet belongs to. + - Only networks that are in the distributed mode can have subnetworks. + returned: success + type: dict + secondaryIpRanges: + description: + - An array of configurations for secondary IP ranges for VM instances contained + in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange + of the subnetwork. The alias IPs may belong to either primary or secondary + ranges. + returned: success + type: complex + contains: + rangeName: + description: + - The name associated with this subnetwork secondary range, used when adding + an alias IP range to a VM instance. The name must be 1-63 characters long, + and comply with RFC1035. The name must be unique within the subnetwork. + returned: success + type: str + ipCidrRange: + description: + - The range of IP addresses belonging to this subnetwork secondary range. + Provide this property when you create the subnetwork. + - Ranges must be unique and non-overlapping with all primary and secondary + IP ranges within a network. Only IPv4 is supported. + returned: success + type: str + privateIpGoogleAccess: + description: + - When enabled, VMs in this subnetwork without external IP addresses can access + Google APIs and services by using Private Google Access. + returned: success + type: bool + privateIpv6GoogleAccess: + description: + - The private IPv6 google access type for the VMs in this subnet. + returned: success + type: str + region: + description: + - The GCP region for this subnetwork. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy.py new file mode 100644 index 000000000..647a9c4ff --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy.py @@ -0,0 +1,435 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_http_proxy +description: +- Represents a TargetHttpProxy resource, which is used by one or more global forwarding + rule to route incoming HTTP requests to a URL map. +short_description: Creates a GCP TargetHttpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + url_map: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the + BackendService. + - 'This field represents a link to a UrlMap resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_url_map task and then set this url_map field to "{{ name-of-resource + }}"' + required: true + type: dict + proxy_bind: + description: + - This field only applies when the forwarding rule that references this target + proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetHttpProxies)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-targethttpproxy + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: httphealthcheck-targethttpproxy + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-targethttpproxy + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + enable_cdn: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a URL map + google.cloud.gcp_compute_url_map: + name: urlmap-targethttpproxy + default_service: "{{ backendservice }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: urlmap + +- name: create a target HTTP proxy + google.cloud.gcp_compute_target_http_proxy: + name: test_object + url_map: "{{ urlmap }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +urlMap: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the BackendService. + returned: success + type: dict +proxyBind: + description: + - This field only applies when the forwarding rule that references this target proxy + has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + url_map=dict(required=True, type='dict'), + proxy_bind=dict(type='bool'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetHttpProxy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('urlMap') != request.get('urlMap'): + url_map_update(module, request, response) + + +def url_map_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/targetHttpProxies/{name}/setUrlMap"]).format(**module.params), + {u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetHttpProxy', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink'), + u'proxyBind': module.params.get('proxy_bind'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'urlMap': response.get(u'urlMap'), + u'proxyBind': response.get(u'proxyBind'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetHttpProxy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy_info.py new file mode 100644 index 000000000..42fbfceba --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_http_proxy_info.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_http_proxy_info +description: +- Gather info for GCP TargetHttpProxy +short_description: Gather info for GCP TargetHttpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target HTTP proxy + gcp_compute_target_http_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + urlMap: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the + BackendService. + returned: success + type: dict + proxyBind: + description: + - This field only applies when the forwarding rule that references this target + proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy.py new file mode 100644 index 000000000..fa15ab92a --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_https_proxy +description: +- Represents a TargetHttpsProxy resource, which is used by one or more global forwarding + rule to route incoming HTTPS requests to a URL map. +short_description: Creates a GCP TargetHttpsProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + quic_override: + description: + - Specifies the QUIC override policy for this resource. This determines whether + the load balancer will attempt to negotiate QUIC with clients or not. Can specify + one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy + with no user overrides, which is equivalent to DISABLE. + - 'Some valid choices include: "NONE", "ENABLE", "DISABLE"' + required: false + type: str + ssl_certificates: + description: + - A list of SslCertificate resources that are used to authenticate connections + between users and the load balancer. At least one SSL certificate must be specified. + elements: dict + required: true + type: list + ssl_policy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy + resource. If not set, the TargetHttpsProxy resource will not have any SSL policy + configured. + - 'This field represents a link to a SslPolicy resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_ssl_policy task and then set this ssl_policy field to "{{ name-of-resource + }}"' + required: false + type: dict + url_map: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the + BackendService. + - 'This field represents a link to a UrlMap resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_url_map task and then set this url_map field to "{{ name-of-resource + }}"' + required: true + type: dict + proxy_bind: + description: + - This field only applies when the forwarding rule that references this target + proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetHttpsProxies)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-targethttpsproxy + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: httphealthcheck-targethttpsproxy + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-targethttpsproxy + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + enable_cdn: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a URL map + google.cloud.gcp_compute_url_map: + name: urlmap-targethttpsproxy + default_service: "{{ backendservice }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: urlmap + +- name: create a SSL certificate + google.cloud.gcp_compute_ssl_certificate: + name: sslcert-targethttpsproxy + description: A certificate for testing. Do not use this certificate in production + certificate: |- + -----BEGIN CERTIFICATE----- + MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG + EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT + BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm + b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN + AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2 + MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP + BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM + FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z + aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH + KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ + 4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O + BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn + 0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O + M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ + zqGNhIPGq2ULqXKK8BY= + -----END CERTIFICATE----- + private_key: |- + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49 + AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f + OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ== + -----END EC PRIVATE KEY----- + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: sslcert + +- name: create a target HTTPS proxy + google.cloud.gcp_compute_target_https_proxy: + name: test_object + ssl_certificates: + - "{{ sslcert }}" + url_map: "{{ urlmap }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +quicOverride: + description: + - Specifies the QUIC override policy for this resource. This determines whether + the load balancer will attempt to negotiate QUIC with clients or not. Can specify + one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC policy with + no user overrides, which is equivalent to DISABLE. + returned: success + type: str +sslCertificates: + description: + - A list of SslCertificate resources that are used to authenticate connections between + users and the load balancer. At least one SSL certificate must be specified. + returned: success + type: list +sslPolicy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy + resource. If not set, the TargetHttpsProxy resource will not have any SSL policy + configured. + returned: success + type: dict +urlMap: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the BackendService. + returned: success + type: dict +proxyBind: + description: + - This field only applies when the forwarding rule that references this target proxy + has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + quic_override=dict(type='str'), + ssl_certificates=dict(required=True, type='list', elements='dict'), + ssl_policy=dict(type='dict'), + url_map=dict(required=True, type='dict'), + proxy_bind=dict(type='bool'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetHttpsProxy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('quicOverride') != request.get('quicOverride'): + quic_override_update(module, request, response) + if response.get('sslCertificates') != request.get('sslCertificates'): + ssl_certificates_update(module, request, response) + if response.get('sslPolicy') != request.get('sslPolicy'): + ssl_policy_update(module, request, response) + if response.get('urlMap') != request.get('urlMap'): + url_map_update(module, request, response) + + +def quic_override_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetHttpsProxies/{name}/setQuicOverride"]).format(**module.params), + {u'quicOverride': module.params.get('quic_override')}, + ) + + +def ssl_certificates_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/targetHttpsProxies/{name}/setSslCertificates"]).format(**module.params), + {u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink')}, + ) + + +def ssl_policy_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetHttpsProxies/{name}/setSslPolicy"]).format(**module.params), + {u'sslPolicy': replace_resource_dict(module.params.get(u'ssl_policy', {}), 'selfLink')}, + ) + + +def url_map_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/targetHttpsProxies/{name}/setUrlMap"]).format(**module.params), + {u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetHttpsProxy', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'quicOverride': module.params.get('quic_override'), + u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink'), + u'sslPolicy': replace_resource_dict(module.params.get(u'ssl_policy', {}), 'selfLink'), + u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink'), + u'proxyBind': module.params.get('proxy_bind'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetHttpsProxies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetHttpsProxies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'quicOverride': response.get(u'quicOverride'), + u'sslCertificates': response.get(u'sslCertificates'), + u'sslPolicy': response.get(u'sslPolicy'), + u'urlMap': response.get(u'urlMap'), + u'proxyBind': response.get(u'proxyBind'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetHttpsProxy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy_info.py new file mode 100644 index 000000000..197237c11 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_https_proxy_info.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_https_proxy_info +description: +- Gather info for GCP TargetHttpsProxy +short_description: Gather info for GCP TargetHttpsProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target HTTPS proxy + gcp_compute_target_https_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + quicOverride: + description: + - Specifies the QUIC override policy for this resource. This determines whether + the load balancer will attempt to negotiate QUIC with clients or not. Can + specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC + policy with no user overrides, which is equivalent to DISABLE. + returned: success + type: str + sslCertificates: + description: + - A list of SslCertificate resources that are used to authenticate connections + between users and the load balancer. At least one SSL certificate must be + specified. + returned: success + type: list + sslPolicy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy + resource. If not set, the TargetHttpsProxy resource will not have any SSL + policy configured. + returned: success + type: dict + urlMap: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the + BackendService. + returned: success + type: dict + proxyBind: + description: + - This field only applies when the forwarding rule that references this target + proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetHttpsProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance.py new file mode 100644 index 000000000..e3fd58e68 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_instance +description: +- Represents a TargetInstance resource which defines an endpoint instance that terminates + traffic of certain protocols. In particular, they are used in Protocol Forwarding, + where forwarding rules can send packets to a non-NAT'ed target instance. Each target + instance contains a single virtual machine instance that receives and handles traffic + from the corresponding forwarding rules. +short_description: Creates a GCP TargetInstance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + instance: + description: + - A URL to the virtual machine instance that handles traffic for this target instance. + Accepts self-links or the partial paths with format `projects/project/zones/zone/instances/instance' + or `zones/zone/instances/instance` . + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + nat_policy: + description: + - NAT option controlling how IPs are NAT'ed to the instance. + - Currently only NO_NAT (default value) is supported. + - 'Some valid choices include: "NO_NAT"' + required: false + default: NO_NAT + type: str + zone: + description: + - URL of the zone where the target instance resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetInstances)' +- 'Using Protocol Forwarding: U(https://cloud.google.com/compute/docs/protocol-forwarding)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instance + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a instance + google.cloud.gcp_compute_instance: + name: "{{ resource_name }}" + machine_type: n1-standard-1 + disks: + - auto_delete: 'true' + boot: 'true' + initialize_params: + source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts + labels: + environment: production + network_interfaces: + - network: "{{ network }}" + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instance + +- name: create a target instance + google.cloud.gcp_compute_target_instance: + name: target + instance: "{{ instance }}" + zone: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +instance: + description: + - A URL to the virtual machine instance that handles traffic for this target instance. + Accepts self-links or the partial paths with format `projects/project/zones/zone/instances/instance' + or `zones/zone/instances/instance` . + returned: success + type: dict +natPolicy: + description: + - NAT option controlling how IPs are NAT'ed to the instance. + - Currently only NO_NAT (default value) is supported. + returned: success + type: str +zone: + description: + - URL of the zone where the target instance resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + instance=dict(required=True, type='dict'), + nat_policy=dict(default='NO_NAT', type='str'), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetInstance' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetInstance', + u'zone': module.params.get('zone'), + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'instance': replace_resource_dict(module.params.get(u'instance', {}), 'selfLink'), + u'natPolicy': module.params.get('nat_policy'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/targetInstances/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/targetInstances".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'instance': replace_resource_dict(module.params.get(u'instance', {}), 'selfLink'), + u'natPolicy': module.params.get('nat_policy'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetInstance') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance_info.py new file mode 100644 index 000000000..106f0ce43 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_instance_info.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_instance_info +description: +- Gather info for GCP TargetInstance +short_description: Gather info for GCP TargetInstance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + zone: + description: + - URL of the zone where the target instance resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target instance + gcp_compute_target_instance_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + instance: + description: + - A URL to the virtual machine instance that handles traffic for this target + instance. Accepts self-links or the partial paths with format `projects/project/zones/zone/instances/instance' + or `zones/zone/instances/instance` . + returned: success + type: dict + natPolicy: + description: + - NAT option controlling how IPs are NAT'ed to the instance. + - Currently only NO_NAT (default value) is supported. + returned: success + type: str + zone: + description: + - URL of the zone where the target instance resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/targetInstances".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool.py new file mode 100644 index 000000000..b5643afa5 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool.py @@ -0,0 +1,522 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_pool +description: +- Represents a TargetPool resource, used for Load Balancing. +short_description: Creates a GCP TargetPool +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + backup_pool: + description: + - This field is applicable only when the containing target pool is serving a forwarding + rule as the primary pool, and its failoverRatio field is properly set to a value + between [0, 1]. + - 'backupPool and failoverRatio together define the fallback behavior of the primary + target pool: if the ratio of the healthy instances in the primary pool is at + or below failoverRatio, traffic arriving at the load-balanced IP will be directed + to the backup pool.' + - In case where failoverRatio and backupPool are not set, or all the instances + in the backup pool are unhealthy, the traffic will be directed back to the primary + pool in the "force" mode, where traffic will be spread to the healthy instances + with the best effort, or to all instances when no instance is healthy. + - 'This field represents a link to a TargetPool resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_target_pool task and then set this backup_pool field to "{{ + name-of-resource }}"' + required: false + type: dict + description: + description: + - An optional description of this resource. + required: false + type: str + failover_ratio: + description: + - This field is applicable only when the containing target pool is serving a forwarding + rule as the primary pool (i.e., not as a backup pool to some other target pool). + The value of the field must be in [0, 1]. + - 'If set, backupPool must also be set. They together define the fallback behavior + of the primary target pool: if the ratio of the healthy instances in the primary + pool is at or below this number, traffic arriving at the load-balanced IP will + be directed to the backup pool.' + - In case where failoverRatio is not set or all the instances in the backup pool + are unhealthy, the traffic will be directed back to the primary pool in the + "force" mode, where traffic will be spread to the healthy instances with the + best effort, or to all instances when no instance is healthy. + required: false + type: str + health_check: + description: + - A reference to a HttpHealthCheck resource. + - A member instance in this pool is considered healthy if and only if the health + checks pass. If not specified it means all member instances will be considered + healthy at all times. + - 'This field represents a link to a HttpHealthCheck resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_http_health_check task and then set this + health_check field to "{{ name-of-resource }}"' + required: false + type: dict + instances: + description: + - A list of virtual machine instances serving this pool. + - They must live in zones contained in the same region as this pool. + elements: dict + required: false + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + session_affinity: + description: + - 'Session affinity option. Must be one of these values: * NONE: Connections from + the same client IP may go to any instance in the pool.' + - "* CLIENT_IP: Connections from the same client IP will go to the same instance + in the pool while that instance remains healthy." + - "* CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol + will go to the same instance in the pool while that instance remains healthy." + - 'Some valid choices include: "NONE", "CLIENT_IP", "CLIENT_IP_PROTO"' + required: false + type: str + region: + description: + - The region where the target pool resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/targetPools)' +- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/network/target-pools)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a target pool + google.cloud.gcp_compute_target_pool: + name: test_object + region: us-west1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +backupPool: + description: + - This field is applicable only when the containing target pool is serving a forwarding + rule as the primary pool, and its failoverRatio field is properly set to a value + between [0, 1]. + - 'backupPool and failoverRatio together define the fallback behavior of the primary + target pool: if the ratio of the healthy instances in the primary pool is at or + below failoverRatio, traffic arriving at the load-balanced IP will be directed + to the backup pool.' + - In case where failoverRatio and backupPool are not set, or all the instances in + the backup pool are unhealthy, the traffic will be directed back to the primary + pool in the "force" mode, where traffic will be spread to the healthy instances + with the best effort, or to all instances when no instance is healthy. + returned: success + type: dict +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +failoverRatio: + description: + - This field is applicable only when the containing target pool is serving a forwarding + rule as the primary pool (i.e., not as a backup pool to some other target pool). + The value of the field must be in [0, 1]. + - 'If set, backupPool must also be set. They together define the fallback behavior + of the primary target pool: if the ratio of the healthy instances in the primary + pool is at or below this number, traffic arriving at the load-balanced IP will + be directed to the backup pool.' + - In case where failoverRatio is not set or all the instances in the backup pool + are unhealthy, the traffic will be directed back to the primary pool in the "force" + mode, where traffic will be spread to the healthy instances with the best effort, + or to all instances when no instance is healthy. + returned: success + type: str +healthCheck: + description: + - A reference to a HttpHealthCheck resource. + - A member instance in this pool is considered healthy if and only if the health + checks pass. If not specified it means all member instances will be considered + healthy at all times. + returned: success + type: dict +id: + description: + - The unique identifier for the resource. + returned: success + type: int +instances: + description: + - A list of virtual machine instances serving this pool. + - They must live in zones contained in the same region as this pool. + returned: success + type: list +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +sessionAffinity: + description: + - 'Session affinity option. Must be one of these values: * NONE: Connections from + the same client IP may go to any instance in the pool.' + - "* CLIENT_IP: Connections from the same client IP will go to the same instance + in the pool while that instance remains healthy." + - "* CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol + will go to the same instance in the pool while that instance remains healthy." + returned: success + type: str +region: + description: + - The region where the target pool resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + backup_pool=dict(type='dict'), + description=dict(type='str'), + failover_ratio=dict(type='str'), + health_check=dict(type='dict'), + instances=dict(type='list', elements='dict'), + name=dict(required=True, type='str'), + session_affinity=dict(type='str'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetPool' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetPool', + u'backupPool': replace_resource_dict(module.params.get(u'backup_pool', {}), 'selfLink'), + u'description': module.params.get('description'), + u'failoverRatio': module.params.get('failover_ratio'), + u'healthCheck': replace_resource_dict(module.params.get(u'health_check', {}), 'selfLink'), + u'instances': replace_resource_dict(module.params.get('instances', []), 'selfLink'), + u'name': module.params.get('name'), + u'sessionAffinity': module.params.get('session_affinity'), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'backupPool': replace_resource_dict(module.params.get(u'backup_pool', {}), 'selfLink'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': response.get(u'description'), + u'failoverRatio': response.get(u'failoverRatio'), + u'healthCheck': response.get(u'healthCheck'), + u'id': response.get(u'id'), + u'instances': response.get(u'instances'), + u'name': module.params.get('name'), + u'sessionAffinity': module.params.get('session_affinity'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + response = fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetPool') + if response: + return decode_response(response, module) + else: + return {} + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +# Mask the fact healthChecks array is actually a single object of type +# HttpHealthCheck. +# +# Google Compute Engine API defines healthChecks as a list but it can only +# take [0, 1] elements. To make it simpler to declare we'll map that to a +# single object and encode/decode as appropriate. +def encode_request(request, module): + if 'healthCheck' in request: + request['healthChecks'] = [request['healthCheck']] + del request['healthCheck'] + return request + + +# Mask healthChecks into a single element. +# @see encode_request for details +def decode_response(response, module): + if response['kind'] != 'compute#targetPool': + return response + + # Map healthChecks[0] => healthCheck + if 'healthChecks' in response: + if not response['healthChecks']: + response['healthCheck'] = response['healthChecks'][0] + del response['healthChecks'] + + return response + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool_info.py new file mode 100644 index 000000000..f61965156 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_pool_info.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_pool_info +description: +- Gather info for GCP TargetPool +short_description: Gather info for GCP TargetPool +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region where the target pool resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target pool + gcp_compute_target_pool_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + backupPool: + description: + - This field is applicable only when the containing target pool is serving a + forwarding rule as the primary pool, and its failoverRatio field is properly + set to a value between [0, 1]. + - 'backupPool and failoverRatio together define the fallback behavior of the + primary target pool: if the ratio of the healthy instances in the primary + pool is at or below failoverRatio, traffic arriving at the load-balanced IP + will be directed to the backup pool.' + - In case where failoverRatio and backupPool are not set, or all the instances + in the backup pool are unhealthy, the traffic will be directed back to the + primary pool in the "force" mode, where traffic will be spread to the healthy + instances with the best effort, or to all instances when no instance is healthy. + returned: success + type: dict + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + failoverRatio: + description: + - This field is applicable only when the containing target pool is serving a + forwarding rule as the primary pool (i.e., not as a backup pool to some other + target pool). The value of the field must be in [0, 1]. + - 'If set, backupPool must also be set. They together define the fallback behavior + of the primary target pool: if the ratio of the healthy instances in the primary + pool is at or below this number, traffic arriving at the load-balanced IP + will be directed to the backup pool.' + - In case where failoverRatio is not set or all the instances in the backup + pool are unhealthy, the traffic will be directed back to the primary pool + in the "force" mode, where traffic will be spread to the healthy instances + with the best effort, or to all instances when no instance is healthy. + returned: success + type: str + healthCheck: + description: + - A reference to a HttpHealthCheck resource. + - A member instance in this pool is considered healthy if and only if the health + checks pass. If not specified it means all member instances will be considered + healthy at all times. + returned: success + type: dict + id: + description: + - The unique identifier for the resource. + returned: success + type: int + instances: + description: + - A list of virtual machine instances serving this pool. + - They must live in zones contained in the same region as this pool. + returned: success + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sessionAffinity: + description: + - 'Session affinity option. Must be one of these values: * NONE: Connections + from the same client IP may go to any instance in the pool.' + - "* CLIENT_IP: Connections from the same client IP will go to the same instance + in the pool while that instance remains healthy." + - "* CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol + will go to the same instance in the pool while that instance remains healthy." + returned: success + type: str + region: + description: + - The region where the target pool resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy.py new file mode 100644 index 000000000..33bf10747 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy.py @@ -0,0 +1,531 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_ssl_proxy +description: +- Represents a TargetSslProxy resource, which is used by one or more global forwarding + rule to route incoming SSL requests to a backend service. +short_description: Creates a GCP TargetSslProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + type: str + service: + description: + - A reference to the BackendService resource. + - 'This field represents a link to a BackendService resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_backend_service task and then set this service + field to "{{ name-of-resource }}"' + required: true + type: dict + ssl_certificates: + description: + - A list of SslCertificate resources that are used to authenticate connections + between users and the load balancer. At least one SSL certificate must be specified. + elements: dict + required: true + type: list + ssl_policy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetSslProxy + resource. If not set, the TargetSslProxy resource will not have any SSL policy + configured. + - 'This field represents a link to a SslPolicy resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_ssl_policy task and then set this ssl_policy field to "{{ name-of-resource + }}"' + required: false + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetSslProxies)' +- 'Setting Up SSL proxy for Google Cloud Load Balancing: U(https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-targetsslproxy + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a health check + google.cloud.gcp_compute_health_check: + name: healthcheck-targetsslproxy + type: TCP + tcp_health_check: + port_name: service-health + request: ping + response: pong + healthy_threshold: 10 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-targetsslproxy + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + protocol: SSL + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a SSL certificate + google.cloud.gcp_compute_ssl_certificate: + name: sslcert-targetsslproxy + description: A certificate for testing. Do not use this certificate in production + certificate: |- + -----BEGIN CERTIFICATE----- + MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG + EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT + BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm + b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN + AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2 + MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP + BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM + FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z + aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH + KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ + 4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O + BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn + 0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O + M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ + zqGNhIPGq2ULqXKK8BY= + -----END CERTIFICATE----- + private_key: |- + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49 + AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f + OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ== + -----END EC PRIVATE KEY----- + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: sslcert + +- name: create a target SSL proxy + google.cloud.gcp_compute_target_ssl_proxy: + name: test_object + ssl_certificates: + - "{{ sslcert }}" + service: "{{ backendservice }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str +service: + description: + - A reference to the BackendService resource. + returned: success + type: dict +sslCertificates: + description: + - A list of SslCertificate resources that are used to authenticate connections between + users and the load balancer. At least one SSL certificate must be specified. + returned: success + type: list +sslPolicy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetSslProxy + resource. If not set, the TargetSslProxy resource will not have any SSL policy + configured. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + proxy_header=dict(type='str'), + service=dict(required=True, type='dict'), + ssl_certificates=dict(required=True, type='list', elements='dict'), + ssl_policy=dict(type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetSslProxy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('proxyHeader') != request.get('proxyHeader'): + proxy_header_update(module, request, response) + if response.get('service') != request.get('service'): + service_update(module, request, response) + if response.get('sslCertificates') != request.get('sslCertificates'): + ssl_certificates_update(module, request, response) + if response.get('sslPolicy') != request.get('sslPolicy'): + ssl_policy_update(module, request, response) + + +def proxy_header_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setProxyHeader"]).format(**module.params), + {u'proxyHeader': module.params.get('proxy_header')}, + ) + + +def service_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setBackendService"]).format(**module.params), + {u'service': replace_resource_dict(module.params.get(u'service', {}), 'selfLink')}, + ) + + +def ssl_certificates_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setSslCertificates"]).format(**module.params), + {u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink')}, + ) + + +def ssl_policy_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setSslPolicy"]).format(**module.params), + {u'sslPolicy': replace_resource_dict(module.params.get(u'ssl_policy', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetSslProxy', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'proxyHeader': module.params.get('proxy_header'), + u'service': replace_resource_dict(module.params.get(u'service', {}), 'selfLink'), + u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink'), + u'sslPolicy': replace_resource_dict(module.params.get(u'ssl_policy', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetSslProxies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetSslProxies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'proxyHeader': response.get(u'proxyHeader'), + u'service': response.get(u'service'), + u'sslCertificates': response.get(u'sslCertificates'), + u'sslPolicy': response.get(u'sslPolicy'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetSslProxy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy_info.py new file mode 100644 index 000000000..b6f17d8a8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_ssl_proxy_info.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_ssl_proxy_info +description: +- Gather info for GCP TargetSslProxy +short_description: Gather info for GCP TargetSslProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target SSL proxy + gcp_compute_target_ssl_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + service: + description: + - A reference to the BackendService resource. + returned: success + type: dict + sslCertificates: + description: + - A list of SslCertificate resources that are used to authenticate connections + between users and the load balancer. At least one SSL certificate must be + specified. + returned: success + type: list + sslPolicy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetSslProxy + resource. If not set, the TargetSslProxy resource will not have any SSL policy + configured. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetSslProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy.py new file mode 100644 index 000000000..29793bb89 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy.py @@ -0,0 +1,453 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_tcp_proxy +description: +- Represents a TargetTcpProxy resource, which is used by one or more global forwarding + rule to route incoming TCP requests to a Backend service. +short_description: Creates a GCP TargetTcpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + proxy_header: + description: + - Specifies the type of proxy header to append before sending data to the backend. + - 'Some valid choices include: "NONE", "PROXY_V1"' + required: false + type: str + service: + description: + - A reference to the BackendService resource. + - 'This field represents a link to a BackendService resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_backend_service task and then set this service + field to "{{ name-of-resource }}"' + required: true + type: dict + proxy_bind: + description: + - This field only applies when the forwarding rule that references this target + proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetTcpProxies)' +- 'Setting Up TCP proxy for Google Cloud Load Balancing: U(https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-targettcpproxy + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a health check + google.cloud.gcp_compute_health_check: + name: healthcheck-targettcpproxy + type: TCP + tcp_health_check: + port_name: service-health + request: ping + response: pong + healthy_threshold: 10 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-targettcpproxy + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + protocol: TCP + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a target TCP proxy + google.cloud.gcp_compute_target_tcp_proxy: + name: test_object + proxy_header: PROXY_V1 + service: "{{ backendservice }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str +service: + description: + - A reference to the BackendService resource. + returned: success + type: dict +proxyBind: + description: + - This field only applies when the forwarding rule that references this target proxy + has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + proxy_header=dict(type='str'), + service=dict(required=True, type='dict'), + proxy_bind=dict(type='bool'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetTcpProxy' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module), kind) + + +def update_fields(module, request, response): + if response.get('proxyHeader') != request.get('proxyHeader'): + proxy_header_update(module, request, response) + if response.get('service') != request.get('service'): + service_update(module, request, response) + + +def proxy_header_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetTcpProxies/{name}/setProxyHeader"]).format(**module.params), + {u'proxyHeader': module.params.get('proxy_header')}, + ) + + +def service_update(module, request, response): + auth = GcpSession(module, 'compute') + auth.post( + ''.join(["https://compute.googleapis.com/compute/v1/", "projects/{project}/global/targetTcpProxies/{name}/setBackendService"]).format(**module.params), + {u'service': replace_resource_dict(module.params.get(u'service', {}), 'selfLink')}, + ) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetTcpProxy', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'proxyHeader': module.params.get('proxy_header'), + u'service': replace_resource_dict(module.params.get(u'service', {}), 'selfLink'), + u'proxyBind': module.params.get('proxy_bind'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetTcpProxies/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetTcpProxies".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': module.params.get('description'), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'proxyHeader': response.get(u'proxyHeader'), + u'service': response.get(u'service'), + u'proxyBind': response.get(u'proxyBind'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetTcpProxy') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy_info.py new file mode 100644 index 000000000..785f1aed7 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_tcp_proxy_info.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_tcp_proxy_info +description: +- Gather info for GCP TargetTcpProxy +short_description: Gather info for GCP TargetTcpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target TCP proxy + gcp_compute_target_tcp_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend. + returned: success + type: str + service: + description: + - A reference to the BackendService resource. + returned: success + type: dict + proxyBind: + description: + - This field only applies when the forwarding rule that references this target + proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/targetTcpProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway.py new file mode 100644 index 000000000..0c5e73309 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_vpn_gateway +description: +- Represents a VPN gateway running in GCP. This virtual device is managed by Google, + but used only by you. +short_description: Creates a GCP TargetVpnGateway +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + network: + description: + - The network this VPN gateway is accepting traffic for. + - 'This field represents a link to a Network resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_network task and then set this network field to "{{ name-of-resource + }}"' + required: true + type: dict + region: + description: + - The region this gateway should sit in. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a address + google.cloud.gcp_compute_address: + name: address-vpngateway + region: us-west1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: address + +- name: create a network + google.cloud.gcp_compute_network: + name: network-vpngateway + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a target vpn gateway + google.cloud.gcp_compute_target_vpn_gateway: + name: test_object + region: us-west1 + network: "{{ network }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +network: + description: + - The network this VPN gateway is accepting traffic for. + returned: success + type: dict +tunnels: + description: + - A list of references to VpnTunnel resources associated with this VPN gateway. + returned: success + type: list +forwardingRules: + description: + - A list of references to the ForwardingRule resources associated with this VPN + gateway. + returned: success + type: list +region: + description: + - The region this gateway should sit in. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(type='str'), + name=dict(required=True, type='str'), + network=dict(required=True, type='dict'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#targetVpnGateway' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#targetVpnGateway', + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'description': module.params.get('description'), + u'name': module.params.get('name'), + u'id': response.get(u'id'), + u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'), + u'tunnels': response.get(u'tunnels'), + u'forwardingRules': response.get(u'forwardingRules'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetVpnGateway') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway_info.py new file mode 100644 index 000000000..24644af42 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_target_vpn_gateway_info.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_vpn_gateway_info +description: +- Gather info for GCP TargetVpnGateway +short_description: Gather info for GCP TargetVpnGateway +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region this gateway should sit in. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target vpn gateway + gcp_compute_target_vpn_gateway_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + network: + description: + - The network this VPN gateway is accepting traffic for. + returned: success + type: dict + tunnels: + description: + - A list of references to VpnTunnel resources associated with this VPN gateway. + returned: success + type: list + forwardingRules: + description: + - A list of references to the ForwardingRule resources associated with this + VPN gateway. + returned: success + type: list + region: + description: + - The region this gateway should sit in. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map.py new file mode 100644 index 000000000..ed35cfc07 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map.py @@ -0,0 +1,7871 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_url_map +description: +- UrlMaps are used to route requests to a backend service based on rules that you + define for the host and path of an incoming URL. +short_description: Creates a GCP UrlMap +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + default_service: + description: + - The full or partial URL of the defaultService resource to which traffic is directed + if none of the hostRules match. If defaultRouteAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if defaultService is specified, defaultRouteAction + cannot contain any weightedBackendServices. Conversely, if routeAction specifies + any weightedBackendServices, service must not be specified. Only one of defaultService, + defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. + - 'This field represents a link to a BackendService resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_backend_service task and then set this default_service + field to "{{ name-of-resource }}"' + required: false + type: dict + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + required: false + type: str + header_action: + description: + - Specifies changes to request and response headers that need to take effect for + the selected backendService. The headerAction specified here take effect after + headerAction specified under pathMatcher. + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding the request to + the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist for + the header. If true, headerValue is set for the header, discarding any + values that were set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response back to the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist for + the header. If true, headerValue is set for the header, discarding any + values that were set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + elements: str + required: false + type: list + host_rules: + description: + - The list of HostRules to use against the URL. + elements: dict + required: false + type: list + suboptions: + description: + description: + - An optional description of this resource. Provide this property when you + create the resource. + required: false + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except + * will match any string of ([a-z0-9-.]*). In that case, * must be the first + character and must be followed in the pattern by either - or . + elements: str + required: true + type: list + path_matcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL + if the hostRule matches the URL's host portion. + required: true + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + type: str + path_matchers: + description: + - The list of named PathMatchers to use against the URL. + elements: dict + required: false + type: list + suboptions: + default_service: + description: + - 'The full or partial URL to the BackendService resource. This will be used + if none of the pathRules or routeRules defined by this PathMatcher are matched. + For example, the following are all valid URLs to a BackendService resource: + - U(https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService) + - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService + If defaultRouteAction is additionally specified, advanced routing actions + like URL Rewrites, etc. take effect prior to sending the request to the + backend. However, if defaultService is specified, defaultRouteAction cannot + contain any weightedBackendServices. Conversely, if defaultRouteAction specifies + any weightedBackendServices, defaultService must not be specified.' + - 'Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService + must be set. Authorization requires one or more of the following Google + IAM permissions on the specified resource defaultService: - compute.backendBuckets.use + - compute.backendServices.use .' + - 'This field represents a link to a BackendService resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_backend_service task and then set this + default_service field to "{{ name-of-resource }}"' + required: false + type: dict + description: + description: + - An optional description of this resource. Provide this property when you + create the resource. + required: false + type: str + header_action: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. HeaderAction specified here are applied + after the matching HttpRouteRule HeaderAction and before the HeaderAction + in the UrlMap . + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed from the + request prior to forwarding the request to the backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response back to the + client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed from the + response prior to sending the response back to the client. + elements: str + required: false + type: list + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + required: true + type: str + path_rules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the + longest-path-first basis. For example: a pathRule with a path /a/b/c/* will + match before /a/b/* irrespective of the order in which those paths appear + in this list. Within a given pathMatcher, only one of pathRules or routeRules + must be set.' + elements: dict + required: false + type: list + suboptions: + service: + description: + - The backend service resource to which traffic is directed if this rule + is matched. If routeAction is additionally specified, advanced routing + actions like URL Rewrites, etc. take effect prior to sending the request + to the backend. However, if service is specified, routeAction cannot + contain any weightedBackendService s. Conversely, if routeAction specifies + any weightedBackendServices, service must not be specified. Only one + of urlRedirect, service or routeAction.weightedBackendService must be + set. + - 'This field represents a link to a BackendService resource in GCP. It + can be specified in two ways. First, you can place a dictionary with + key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_backend_service + task and then set this service field to "{{ name-of-resource }}"' + required: false + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the + only place a \\* is allowed is at the end following a /. The string fed + to the path matcher does not include any text after the first ? or #, + and those chars are not allowed here.' + elements: str + required: true + type: list + route_action: + description: + - In response to a matching path, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction specifies + any weightedBackendServices, service must not be set. Conversely if + service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + required: false + type: dict + suboptions: + cors_policy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + required: false + type: dict + suboptions: + allow_credentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to + false. + required: false + default: 'false' + type: bool + allow_headers: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + elements: str + required: false + type: list + allow_methods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + elements: str + required: false + type: list + allow_origin_regexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + elements: str + required: false + type: list + allow_origins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + elements: str + required: false + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + required: true + type: bool + expose_headers: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + elements: str + required: false + type: list + max_age: + description: + - Specifies how long the results of a preflight request can be + cached. This translates to the content for the Access-Control-Max-Age + header. + required: false + type: int + fault_injection_policy: + description: + - The specification for fault injection introduced into traffic to + test the resiliency of clients to backend service failure. As part + of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests + before sending those request to the backend service. Similarly requests + from clients can be aborted by the Loadbalancer for a percentage + of requests. timeout and retry_policy will be ignored by clients + that are configured with a fault_injection_policy. + required: false + type: dict + suboptions: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + required: false + type: dict + suboptions: + http_status: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + required: true + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + required: true + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + required: false + type: dict + suboptions: + fixed_delay: + description: + - Specifies the value of the fixed delay interval. + required: true + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + required: true + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + required: true + type: str + request_mirror_policy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + required: false + type: dict + suboptions: + backend_service: + description: + - The BackendService resource being mirrored to. + - 'This field represents a link to a BackendService resource in + GCP. It can be specified in two ways. First, you can place a + dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_backend_service task and then set this backend_service + field to "{{ name-of-resource }}"' + required: true + type: dict + retry_policy: + description: + - Specifies the retry policy associated with this route. + required: false + type: dict + suboptions: + num_retries: + description: + - Specifies the allowed number retries. This number must be > + 0. + required: false + type: int + per_try_timeout: + description: + - Specifies a non-zero timeout per retry attempt. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. Must + be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 + to 315,576,000,000 inclusive. + required: true + type: str + retry_conditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or + if the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is + set to resource-exhausted * unavailable: Loadbalancer will retry + if the gRPC status code in the response header is set to unavailable + ." + elements: str + required: false + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + required: true + type: str + url_rewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + required: false + type: dict + suboptions: + host_rewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + required: false + type: str + path_prefix_rewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + required: false + type: str + weighted_backend_services: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that + flows to their corresponding backend service. If all traffic needs + to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + elements: dict + required: false + type: list + suboptions: + backend_service: + description: + - The default BackendService resource. Before forwarding the request + to backendService, the loadbalancer applies any relevant headerActions + specified as part of this backendServiceWeight. + - 'This field represents a link to a BackendService resource in + GCP. It can be specified in two ways. First, you can place a + dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_backend_service task and then set this backend_service + field to "{{ name-of-resource }}"' + required: true + type: dict + header_action: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response + back to the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + elements: str + required: false + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for + new traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService + as determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + required: true + type: int + url_redirect: + description: + - When a path pattern is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between + 1 and 255 characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. + - If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps + used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. + - The value must be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path of + the original request will be used for the redirect. The value must + be between 1 and 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value + and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. + required: false + default: 'false' + type: bool + route_rules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order + of specifying routeRules matters: the first rule that matches will cause + its specified routing action to take effect. Within a given pathMatcher, + only one of pathRules or routeRules must be set. routeRules are not supported + in UrlMaps intended for External load balancers.' + elements: dict + required: false + type: list + suboptions: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the order + in which load balancer will interpret routeRules. RouteRules are evaluated + in order of priority, from the lowest to highest number. The priority + of a rule decreases as its number increases (1, 2, 3, N+1). The first + rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove rules + in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which + you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in + the future without any impact on existing rules. + required: true + type: int + service: + description: + - The backend service resource to which traffic is directed if this rule + is matched. If routeAction is additionally specified, advanced routing + actions like URL Rewrites, etc. take effect prior to sending the request + to the backend. However, if service is specified, routeAction cannot + contain any weightedBackendService s. Conversely, if routeAction specifies + any weightedBackendServices, service must not be specified. Only one + of urlRedirect, service or routeAction.weightedBackendService must be + set. + - 'This field represents a link to a BackendService resource in GCP. It + can be specified in two ways. First, you can place a dictionary with + key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_backend_service + task and then set this service field to "{{ name-of-resource }}"' + required: false + type: dict + header_action: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. The headerAction specified here + are applied before the matching pathMatchers[].headerAction and after + pathMatchers[].routeRules[].r outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the header, + discarding any values that were set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response back to + the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the header, + discarding any values that were set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + elements: str + required: false + type: list + match_rules: + description: + - The rules for determining a match. + elements: dict + required: false + type: list + suboptions: + full_path_match: + description: + - For satisfying the matchRule condition, the path of the request + must exactly match the value specified in fullPathMatch after removing + any query parameters and anchor that may be part of the original + URL. FullPathMatch must be between 1 and 1024 characters. Only one + of prefixMatch, fullPathMatch or regexMatch must be specified. + required: false + type: str + header_matches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + elements: dict + required: false + type: list + suboptions: + exact_match: + description: + - The value should exactly match contents of exactMatch. Only + one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + required: false + type: str + header_name: + description: + - The name of the HTTP header to match. For matching against the + HTTP request's authority, use a headerMatch with the header + name ":authority". For matching a request's method, use the + headerName ":method". + required: true + type: str + invert_match: + description: + - If set to false, the headerMatch is considered a match if the + match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT met. + Defaults to false. + required: false + default: 'false' + type: bool + prefix_match: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: str + present_match: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value + or not. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: bool + range_match: + description: + - The header value must be an integer and its value must be in + the range specified in rangeMatch. If the header does not contain + an integer, number or is empty, the match fails. For example + for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 + will not match. - -3someString will not match. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + required: false + type: dict + suboptions: + range_end: + description: + - The end of the range (exclusive). + required: true + type: int + range_start: + description: + - The start of the range (inclusive). + required: true + type: int + regex_match: + description: + - 'The value of the header must match the regular expression specified + in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript + For matching against a port specified in the HTTP request, use + a headerMatch with headerName set to PORT and a regular expression + that satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: str + suffix_match: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + required: false + type: str + ignore_case: + description: + - Specifies that prefixMatch and fullPathMatch matches are case sensitive. + - Defaults to false. + required: false + default: 'false' + type: bool + metadata_filters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing + configuration to a limited set xDS compliant clients. In their xDS + requests to Loadbalancer, xDS clients present node metadata. If + a match takes place, the relevant routing configuration is made + available to those proxies. For each metadataFilter in this list, + if its filterMatchCriteria is set to MATCH_ANY, at least one of + the filterLabels must match the corresponding label provided in + the metadata. If its filterMatchCriteria is set to MATCH_ALL, then + all of its filterLabels must match with corresponding labels in + the provided metadata. metadataFilters specified here can be overrides + those specified in ForwardingRule that refers to this UrlMap. metadataFilters + only applies to Loadbalancers that have their loadBalancingScheme + set to INTERNAL_SELF_MANAGED. + elements: dict + required: false + type: list + suboptions: + filter_labels: + description: + - The list of label value pairs that must match labels in the + provided metadata based on filterMatchCriteria This list must + not be empty and can have at the most 64 entries. + elements: dict + required: true + type: list + suboptions: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + required: true + type: str + value: + description: + - The value of the label must match the specified value. value + can have a maximum length of 1024 characters. + required: true + type: str + filter_match_criteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: - MATCH_ANY: At least one of the + filterLabels must have a matching label in the provided metadata.' + - "- MATCH_ALL: All filterLabels must have matching labels in + the provided metadata." + - 'Some valid choices include: "MATCH_ALL", "MATCH_ANY"' + required: true + type: str + prefix_match: + description: + - For satisfying the matchRule condition, the request's path must + begin with the specified prefixMatch. prefixMatch must begin with + a /. The value must be between 1 and 1024 characters. Only one of + prefixMatch, fullPathMatch or regexMatch must be specified. + required: false + type: str + query_parameter_matches: + description: + - Specifies a list of query parameter match criteria, all of which + must match corresponding query parameters in the request. + elements: dict + required: false + type: list + suboptions: + exact_match: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + required: false + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + required: true + type: str + present_match: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the parameter + has a value or not. Only one of presentMatch, exactMatch and + regexMatch must be set. + required: false + type: bool + regex_match: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For + the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be + set. + required: false + type: str + regex_match: + description: + - For satisfying the matchRule condition, the path of the request + must satisfy the regular expression specified in regexMatch after + removing any query parameters and anchor supplied with the original + URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + required: false + type: str + route_action: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction specifies + any weightedBackendServices, service must not be set. Conversely if + service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + required: false + type: dict + suboptions: + cors_policy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + required: false + type: dict + suboptions: + allow_credentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to + false. + required: false + default: 'false' + type: bool + allow_headers: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + elements: str + required: false + type: list + allow_methods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + elements: str + required: false + type: list + allow_origin_regexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + elements: str + required: false + type: list + allow_origins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + elements: str + required: false + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults + to false. + required: false + default: 'false' + type: bool + expose_headers: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + elements: str + required: false + type: list + max_age: + description: + - Specifies how long the results of a preflight request can be + cached. This translates to the content for the Access-Control-Max-Age + header. + required: false + type: int + fault_injection_policy: + description: + - The specification for fault injection introduced into traffic to + test the resiliency of clients to backend service failure. As part + of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests + before sending those request to the backend service. Similarly requests + from clients can be aborted by the Loadbalancer for a percentage + of requests. timeout and retry_policy will be ignored by clients + that are configured with a fault_injection_policy. + required: false + type: dict + suboptions: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + required: false + type: dict + suboptions: + http_status: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + required: false + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + required: false + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + required: false + type: dict + suboptions: + fixed_delay: + description: + - Specifies the value of the fixed delay interval. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + required: true + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + required: false + type: str + request_mirror_policy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + required: false + type: dict + suboptions: + backend_service: + description: + - The BackendService resource being mirrored to. + - 'This field represents a link to a BackendService resource in + GCP. It can be specified in two ways. First, you can place a + dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_backend_service task and then set this backend_service + field to "{{ name-of-resource }}"' + required: true + type: dict + retry_policy: + description: + - Specifies the retry policy associated with this route. + required: false + type: dict + suboptions: + num_retries: + description: + - Specifies the allowed number retries. This number must be > + 0. + required: true + type: int + per_try_timeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. + If timeout in HttpRouteAction is not set, will use the largest + timeout among all backend services associated with the route. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. Must + be from 0 to 999,999,999 inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 + to 315,576,000,000 inclusive. + required: true + type: str + retry_conditions: + description: + - 'Specfies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or + if the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is + set to resource-exhausted * unavailable: Loadbalancer will retry + if the gRPC status code in the response header is set to unavailable + ." + elements: str + required: false + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + required: false + type: dict + suboptions: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + required: true + type: str + url_rewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + required: false + type: dict + suboptions: + host_rewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + required: false + type: str + path_prefix_rewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + required: false + type: str + weighted_backend_services: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that + flows to their corresponding backend service. If all traffic needs + to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + elements: dict + required: false + type: list + suboptions: + backend_service: + description: + - The default BackendService resource. Before forwarding the request + to backendService, the loadbalancer applies any relevant headerActions + specified as part of this backendServiceWeight. + - 'This field represents a link to a BackendService resource in + GCP. It can be specified in two ways. First, you can place a + dictionary with key ''selfLink'' and value of your resource''s + selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_backend_service task and then set this backend_service + field to "{{ name-of-resource }}"' + required: true + type: dict + header_action: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + required: false + type: dict + suboptions: + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response + back to the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header. + required: true + type: str + header_value: + description: + - The value of the header to add. + required: true + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue is + set for the header, discarding any values that were + set for that header. + required: true + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + elements: str + required: false + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for + new traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService + as determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + required: true + type: int + url_redirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between + 1 and 255 characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. If set to false, the URL scheme of the redirected request + will remain the same as that of the request. This must only be set + for UrlMaps used in TargetHttpProxys. + - Setting this true for TargetHttpsProxy is not permitted. Defaults + to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. Only one of pathRedirect or + prefixRedirect must be specified. The value must be between 1 and + 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value + and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. Defaults to false. + required: false + default: 'false' + type: bool + default_url_redirect: + description: + - When none of the specified hostRules match, the request is redirected to + a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, + defaultService or defaultRouteAction must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps used + in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. + The default is set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied together + with prefixRedirect. Supply one alone or neither. If neither is supplied, + the path of the original request will be used for the redirect. The + value must be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply + one alone or neither. If neither is supplied, the path of the original + request will be used for the redirect. The value must be between 1 and + 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", + "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query + portion of the original URL is retained. + required: false + default: 'false' + type: bool + default_route_action: + description: + - defaultRouteAction takes effect when none of the pathRules or routeRules + match. The load balancer performs advanced routing actions like URL rewrites, + header transformations, etc. prior to forwarding the request to the selected + backend. If defaultRouteAction specifies any weightedBackendServices, defaultService + must not be set. + - Conversely if defaultService is set, defaultRouteAction cannot contain any + weightedBackendServices. + - Only one of defaultRouteAction or defaultUrlRedirect must be set. + required: false + type: dict + suboptions: + weighted_backend_services: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. + - The weights determine the fraction of traffic that flows to their corresponding + backend service. + - If all traffic needs to go to a single backend service, there must be + one weightedBackendService with weight set to a non 0 number. + - Once a backendService is identified and before forwarding the request + to the backend service, advanced routing actions like Url rewrites and + header transformations are applied depending on additional settings + specified in this HttpRouteAction. + elements: dict + required: false + type: list + suboptions: + backend_service: + description: + - The full or partial URL to the default BackendService resource. + Before forwarding the request to backendService, the loadbalancer + applies any relevant headerActions specified as part of this backendServiceWeight. + - 'This field represents a link to a BackendService resource in GCP. + It can be specified in two ways. First, you can place a dictionary + with key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_backend_service + task and then set this backend_service field to "{{ name-of-resource + }}"' + required: false + type: dict + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . + - The selection of a backend service is determined only for new traffic. + Once a user's request has been directed to a backendService, subsequent + requests will be sent to the same backendService as determined by + the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + required: false + type: int + header_action: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. + - headerAction specified here take effect before headerAction in the + enclosing HttpRouteRule, PathMatcher and UrlMap. + required: false + type: dict + suboptions: + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + elements: str + required: false + type: list + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding the + request to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header to add. + required: false + type: str + header_value: + description: + - The value of the header to add. + required: false + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any + values that were set for that header. + required: false + default: 'false' + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response back + to the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header to add. + required: false + type: str + header_value: + description: + - The value of the header to add. + required: false + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any + values that were set for that header. + required: false + default: 'false' + type: bool + url_rewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the request + to the matched service. + required: false + type: dict + suboptions: + path_prefix_rewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + - The value must be between 1 and 1024 characters. + required: false + type: str + host_rewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. + - The value must be between 1 and 255 characters. + required: false + type: str + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed from + the time the request has been fully processed (i.e. end-of-stream) up + until the response has been completely processed. Timeout includes all + retries. + - If not specified, will use the largest timeout among all backend services + associated with the route. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * + 24 hr/day * 365.25 days/year * 10000 years .' + required: false + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + retry_policy: + description: + - Specifies the retry policy associated with this route. + required: false + type: dict + suboptions: + retry_conditions: + description: + - 'Specfies one or more conditions when this retry rule applies. Valid + values are: * 5xx: Loadbalancer will attempt a retry if the backend + service responds with any 5xx response code, or if the backend service + does not respond at all, example: disconnects, reset, read timeout, + * connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream:Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code." + - This reset type indicates that it is safe to retry. + - "* cancelled: Loadbalancer will retry if the gRPC status code in + the response header is set to cancelled * deadline-exceeded: Loadbalancer + will retry if the gRPC status code in the response header is set + to deadline-exceeded * resource-exhausted: Loadbalancer will retry + if the gRPC status code in the response header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in + the response header is set to unavailable ." + elements: str + required: false + type: list + num_retries: + description: + - Specifies the allowed number retries. This number must be > 0. If + not specified, defaults to 1. + required: false + default: '1' + type: int + per_try_timeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. If + timeout in HttpRouteAction is not set, will use the largest timeout + among all backend services associated with the route. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + required: false + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + request_mirror_policy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. + - Loadbalancer does not wait for responses from the shadow service. Prior + to sending traffic to the shadow service, the host / authority header + is suffixed with -shadow. + required: false + type: dict + suboptions: + backend_service: + description: + - The full or partial URL to the BackendService resource being mirrored + to. + - 'This field represents a link to a BackendService resource in GCP. + It can be specified in two ways. First, you can place a dictionary + with key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_backend_service + task and then set this backend_service field to "{{ name-of-resource + }}"' + required: true + type: dict + cors_policy: + description: + - The specification for allowing client side cross-origin requests. Please + see [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + . + required: false + type: dict + suboptions: + allow_origins: + description: + - Specifies the list of origins that will be allowed to do CORS requests. + - An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + elements: str + required: false + type: list + allow_origin_regexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + elements: str + required: false + type: list + allow_methods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + elements: str + required: false + type: list + allow_headers: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + elements: str + required: false + type: list + expose_headers: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + elements: str + required: false + type: list + max_age: + description: + - Specifies how long results of a preflight request can be cached + in seconds. + - This translates to the Access-Control-Max-Age header. + required: false + type: int + allow_credentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. + - This translates to the Access-Control-Allow-Credentials header. + required: false + default: 'false' + type: bool + disabled: + description: + - If true, specifies the CORS policy is disabled. The default value + is false, which indicates that the CORS policy is in effect. + required: false + default: 'false' + type: bool + fault_injection_policy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. + - As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage of + requests before sending those request to the backend service. Similarly + requests from clients can be aborted by the Loadbalancer for a percentage + of requests. + - timeout and retryPolicy will be ignored by clients that are configured + with a faultInjectionPolicy. + required: false + type: dict + suboptions: + delay: + description: + - The specification for how client requests are delayed as part of + fault injection, before being sent to a backend service. + required: false + type: dict + suboptions: + fixed_delay: + description: + - Specifies the value of the fixed delay interval. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 + to 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + required: false + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + required: false + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + required: false + type: str + abort: + description: + - The specification for how client requests are aborted as part of + fault injection. + required: false + type: dict + suboptions: + http_status: + description: + - The HTTP status code used to abort the request. + - The value must be between 200 and 599 inclusive. + required: false + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + required: false + type: str + tests: + description: + - The list of expected URL mapping tests. Request to update this UrlMap will succeed + only if all of the test cases pass. You can specify a maximum of 100 tests per + UrlMap. + elements: dict + required: false + type: list + suboptions: + description: + description: + - Description of this test case. + required: false + type: str + host: + description: + - Host portion of the URL. + required: true + type: str + path: + description: + - Path portion of the URL. + required: true + type: str + service: + description: + - Expected BackendService resource the given URL should be mapped to. + - 'This field represents a link to a BackendService resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_backend_service task and then set this + service field to "{{ name-of-resource }}"' + required: true + type: dict + default_url_redirect: + description: + - When none of the specified hostRules match, the request is redirected to a URL + specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + required: false + type: dict + suboptions: + host_redirect: + description: + - The host that will be used in the redirect response instead of the one that + was supplied in the request. The value must be between 1 and 255 characters. + required: false + type: str + https_redirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in TargetHttpProxys. + Setting this true for TargetHttpsProxy is not permitted. The default is + set to false. + required: false + default: 'false' + type: bool + path_redirect: + description: + - The path that will be used in the redirect response instead of the one that + was supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the + path of the original request will be used for the redirect. The value must + be between 1 and 1024 characters. + required: false + type: str + prefix_redirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one + alone or neither. If neither is supplied, the path of the original request + will be used for the redirect. The value must be between 1 and 1024 characters. + required: false + type: str + redirect_response_code: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values are: + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + - 'Some valid choices include: "FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", + "SEE_OTHER", "TEMPORARY_REDIRECT"' + required: false + type: str + strip_query: + description: + - If set to true, any accompanying query portion of the original URL is removed + prior to redirecting the request. If set to false, the query portion of + the original URL is retained. The default is set to false. + required: false + default: 'false' + type: bool + default_route_action: + description: + - defaultRouteAction takes effect when none of the hostRules match. The load balancer + performs advanced routing actions like URL rewrites, header transformations, + etc. prior to forwarding the request to the selected backend. + - If defaultRouteAction specifies any weightedBackendServices, defaultService + must not be set. Conversely if defaultService is set, defaultRouteAction cannot + contain any weightedBackendServices. + - Only one of defaultRouteAction or defaultUrlRedirect must be set. + required: false + type: dict + suboptions: + weighted_backend_services: + description: + - A list of weighted backend services to send traffic to when a route match + occurs. + - The weights determine the fraction of traffic that flows to their corresponding + backend service. + - If all traffic needs to go to a single backend service, there must be one + weightedBackendService with weight set to a non 0 number. + - Once a backendService is identified and before forwarding the request to + the backend service, advanced routing actions like Url rewrites and header + transformations are applied depending on additional settings specified in + this HttpRouteAction. + elements: dict + required: false + type: list + suboptions: + backend_service: + description: + - The full or partial URL to the default BackendService resource. Before + forwarding the request to backendService, the loadbalancer applies any + relevant headerActions specified as part of this backendServiceWeight. + - 'This field represents a link to a BackendService resource in GCP. It + can be specified in two ways. First, you can place a dictionary with + key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_backend_service + task and then set this backend_service field to "{{ name-of-resource + }}"' + required: false + type: dict + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed as + weight / (sum of all weightedBackendService weights in routeAction) + . + - The selection of a backend service is determined only for new traffic. + Once a user's request has been directed to a backendService, subsequent + requests will be sent to the same backendService as determined by the + BackendService's session affinity policy. + - The value must be between 0 and 1000 . + required: false + type: int + header_action: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. + - headerAction specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + required: false + type: dict + suboptions: + request_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + elements: str + required: false + type: list + request_headers_to_add: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header to add. + required: false + type: str + header_value: + description: + - The value of the header to add. + required: false + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any values + that were set for that header. + required: false + default: 'false' + type: bool + response_headers_to_remove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + elements: str + required: false + type: list + response_headers_to_add: + description: + - Headers to add the response prior to sending the response back to + the client. + elements: dict + required: false + type: list + suboptions: + header_name: + description: + - The name of the header to add. + required: false + type: str + header_value: + description: + - The value of the header to add. + required: false + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any values + that were set for that header. + required: false + default: 'false' + type: bool + url_rewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the request + to the matched service. + required: false + type: dict + suboptions: + path_prefix_rewrite: + description: + - Prior to forwarding the request to the selected backend service, the + matching portion of the request's path is replaced by pathPrefixRewrite. + - The value must be between 1 and 1024 characters. + required: false + type: str + host_rewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. + - The value must be between 1 and 255 characters. + required: false + type: str + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed from the + time the request has been fully processed (i.e. end-of-stream) up until + the response has been completely processed. Timeout includes all retries. + - If not specified, will use the largest timeout among all backend services + associated with the route. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years .' + required: false + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds field + and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + required: false + type: int + retry_policy: + description: + - Specifies the retry policy associated with this route. + required: false + type: dict + suboptions: + retry_conditions: + description: + - 'Specfies one or more conditions when this retry rule applies. Valid + values are: * 5xx: Loadbalancer will attempt a retry if the backend + service responds with any 5xx response code, or if the backend service + does not respond at all, example: disconnects, reset, read timeout, + * connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting to + backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream:Loadbalancer will retry if the backend service resets + the stream with a REFUSED_STREAM error code." + - This reset type indicates that it is safe to retry. + - "* cancelled: Loadbalancer will retry if the gRPC status code in the + response header is set to cancelled * deadline-exceeded: Loadbalancer + will retry if the gRPC status code in the response header is set to + deadline-exceeded * resource-exhausted: Loadbalancer will retry if the + gRPC status code in the response header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in the + response header is set to unavailable ." + elements: str + required: false + type: list + num_retries: + description: + - Specifies the allowed number retries. This number must be > 0. If not + specified, defaults to 1. + required: false + default: '1' + type: int + per_try_timeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. If timeout + in HttpRouteAction is not set, will use the largest timeout among all + backend services associated with the route. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * + 24 hr/day * 365.25 days/year * 10000 years .' + required: false + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + request_mirror_policy: + description: + - Specifies the policy on how requests intended for the route's backends are + shadowed to a separate mirrored backend service. + - Loadbalancer does not wait for responses from the shadow service. Prior + to sending traffic to the shadow service, the host / authority header is + suffixed with -shadow. + required: false + type: dict + suboptions: + backend_service: + description: + - The full or partial URL to the BackendService resource being mirrored + to. + - 'This field represents a link to a BackendService resource in GCP. It + can be specified in two ways. First, you can place a dictionary with + key ''selfLink'' and value of your resource''s selfLink Alternatively, + you can add `register: name-of-resource` to a gcp_compute_backend_service + task and then set this backend_service field to "{{ name-of-resource + }}"' + required: true + type: dict + cors_policy: + description: + - The specification for allowing client side cross-origin requests. Please + see [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + . + required: false + type: dict + suboptions: + allow_origins: + description: + - Specifies the list of origins that will be allowed to do CORS requests. + - An origin is allowed if it matches either an item in allowOrigins or + an item in allowOriginRegexes. + elements: str + required: false + type: list + allow_origin_regexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins or + an item in allowOriginRegexes. + elements: str + required: false + type: list + allow_methods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + elements: str + required: false + type: list + allow_headers: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + elements: str + required: false + type: list + expose_headers: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + elements: str + required: false + type: list + max_age: + description: + - Specifies how long results of a preflight request can be cached in seconds. + - This translates to the Access-Control-Max-Age header. + required: false + type: int + allow_credentials: + description: + - In response to a preflight request, setting this to true indicates that + the actual request can include user credentials. + - This translates to the Access-Control-Allow-Credentials header. + required: false + default: 'false' + type: bool + disabled: + description: + - If true, specifies the CORS policy is disabled. The default value is + false, which indicates that the CORS policy is in effect. + required: false + default: 'false' + type: bool + fault_injection_policy: + description: + - The specification for fault injection introduced into traffic to test the + resiliency of clients to backend service failure. + - As part of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from clients + can be aborted by the Loadbalancer for a percentage of requests. + - timeout and retryPolicy will be ignored by clients that are configured with + a faultInjectionPolicy. + required: false + type: dict + suboptions: + delay: + description: + - The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + required: false + type: dict + suboptions: + fixed_delay: + description: + - Specifies the value of the fixed delay interval. + required: false + type: dict + suboptions: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + required: false + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + required: false + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) on which + delay will be introduced as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + required: false + type: str + abort: + description: + - The specification for how client requests are aborted as part of fault + injection. + required: false + type: dict + suboptions: + http_status: + description: + - The HTTP status code used to abort the request. + - The value must be between 200 and 599 inclusive. + required: false + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) which + will be aborted as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance group + google.cloud.gcp_compute_instance_group: + name: instancegroup-urlmap + zone: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instancegroup + +- name: create a HTTP health check + google.cloud.gcp_compute_http_health_check: + name: httphealthcheck-urlmap + healthy_threshold: 10 + port: 8080 + timeout_sec: 2 + unhealthy_threshold: 5 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: healthcheck + +- name: create a backend service + google.cloud.gcp_compute_backend_service: + name: backendservice-urlmap + backends: + - group: "{{ instancegroup.selfLink }}" + health_checks: + - "{{ healthcheck.selfLink }}" + enable_cdn: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: backendservice + +- name: create a URL map + google.cloud.gcp_compute_url_map: + name: test_object + default_service: "{{ backendservice }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +defaultService: + description: + - The full or partial URL of the defaultService resource to which traffic is directed + if none of the hostRules match. If defaultRouteAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if defaultService is specified, defaultRouteAction + cannot contain any weightedBackendServices. Conversely, if routeAction specifies + any weightedBackendServices, service must not be specified. Only one of defaultService, + defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. + returned: success + type: dict +description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str +id: + description: + - The unique identifier for the resource. + returned: success + type: int +fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. + returned: success + type: str +headerAction: + description: + - Specifies changes to request and response headers that need to take effect for + the selected backendService. The headerAction specified here take effect after + headerAction specified under pathMatcher. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request to the + backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist for + the header. If true, headerValue is set for the header, discarding any + values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist for + the header. If true, headerValue is set for the header, discarding any + values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + returned: success + type: list +hostRules: + description: + - The list of HostRules to use against the URL. + returned: success + type: complex + contains: + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except * + will match any string of ([a-z0-9-.]*). In that case, * must be the first + character and must be followed in the pattern by either - or . + returned: success + type: list + pathMatcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL if + the hostRule matches the URL's host portion. + returned: success + type: str +name: + description: + - Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +pathMatchers: + description: + - The list of named PathMatchers to use against the URL. + returned: success + type: complex + contains: + defaultService: + description: + - 'The full or partial URL to the BackendService resource. This will be used + if none of the pathRules or routeRules defined by this PathMatcher are matched. + For example, the following are all valid URLs to a BackendService resource: + - U(https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService) + - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService + If defaultRouteAction is additionally specified, advanced routing actions + like URL Rewrites, etc. take effect prior to sending the request to the backend. + However, if defaultService is specified, defaultRouteAction cannot contain + any weightedBackendServices. Conversely, if defaultRouteAction specifies any + weightedBackendServices, defaultService must not be specified.' + - 'Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService + must be set. Authorization requires one or more of the following Google IAM + permissions on the specified resource defaultService: - compute.backendBuckets.use + - compute.backendServices.use .' + returned: success + type: dict + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. HeaderAction specified here are applied after + the matching HttpRouteRule HeaderAction and before the HeaderAction in the + UrlMap . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request to + the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to the + client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + returned: success + type: list + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + returned: success + type: str + pathRules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the longest-path-first + basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* + irrespective of the order in which those paths appear in this list. Within + a given pathMatcher, only one of pathRules or routeRules must be set.' + returned: success + type: complex + contains: + service: + description: + - The backend service resource to which traffic is directed if this rule + is matched. If routeAction is additionally specified, advanced routing + actions like URL Rewrites, etc. take effect prior to sending the request + to the backend. However, if service is specified, routeAction cannot contain + any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + returned: success + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the only + place a \\* is allowed is at the end following a /. The string fed to the + path matcher does not include any text after the first ? or #, and those + chars are not allowed here.' + returned: success + type: list + routeAction: + description: + - In response to a matching path, the load balancer performs advanced routing + actions like URL rewrites, header transformations, etc. prior to forwarding + the request to the selected backend. If routeAction specifies any weightedBackendServices, + service must not be set. Conversely if service is set, routeAction cannot + contain any weightedBackendServices. Only one of routeAction or urlRedirect + must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can be cached. + This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. As part of fault + injection, when clients send requests to a backend service, delays + can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from + clients can be aborted by the Loadbalancer for a percentage of requests. + timeout and retry_policy will be ignored by clients that are configured + with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The BackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or if + the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is set + to resource-exhausted * unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable + ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. The value + must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that flows + to their corresponding backend service. If all traffic needs to go + to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are applied + depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default BackendService resource. Before forwarding the request + to backendService, the loadbalancer applies any relevant headerActions + specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to + take effect for the selected backendService. headerAction specified + here take effect before headerAction in the enclosing HttpRouteRule, + PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the + request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to the + client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for new + traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService as + determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When a path pattern is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction must + not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between 1 + and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to + https. + - If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps + used in TargetHttpProxys. Setting this true for TargetHttpsProxy is + not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. + - The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be + between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. + returned: success + type: bool + routeRules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order of + specifying routeRules matters: the first rule that matches will cause its + specified routing action to take effect. Within a given pathMatcher, only + one of pathRules or routeRules must be set. routeRules are not supported in + UrlMaps intended for External load balancers.' + returned: success + type: complex + contains: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the order + in which load balancer will interpret routeRules. RouteRules are evaluated + in order of priority, from the lowest to highest number. The priority + of a rule decreases as its number increases (1, 2, 3, N+1). The first + rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove rules + in the future without affecting the rest of the rules. For example, 1, + 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you + could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future + without any impact on existing rules. + returned: success + type: int + service: + description: + - The backend service resource to which traffic is directed if this rule + is matched. If routeAction is additionally specified, advanced routing + actions like URL Rewrites, etc. take effect prior to sending the request + to the backend. However, if service is specified, routeAction cannot contain + any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. The headerAction specified here are applied + before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r + outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to + the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + response prior to sending the response back to the client. + returned: success + type: list + matchRules: + description: + - The rules for determining a match. + returned: success + type: complex + contains: + fullPathMatch: + description: + - For satisfying the matchRule condition, the path of the request must + exactly match the value specified in fullPathMatch after removing + any query parameters and anchor that may be part of the original URL. + FullPathMatch must be between 1 and 1024 characters. Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + returned: success + type: str + headerMatches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The value should exactly match contents of exactMatch. Only one + of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + returned: success + type: str + headerName: + description: + - The name of the HTTP header to match. For matching against the + HTTP request's authority, use a headerMatch with the header name + ":authority". For matching a request's method, use the headerName + ":method". + returned: success + type: str + invertMatch: + description: + - If set to false, the headerMatch is considered a match if the + match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT met. + Defaults to false. + returned: success + type: bool + prefixMatch: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + presentMatch: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value or + not. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: bool + rangeMatch: + description: + - The header value must be an integer and its value must be in the + range specified in rangeMatch. If the header does not contain + an integer, number or is empty, the match fails. For example for + a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will + not match. - -3someString will not match. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + returned: success + type: complex + contains: + rangeEnd: + description: + - The end of the range (exclusive). + returned: success + type: int + rangeStart: + description: + - The start of the range (inclusive). + returned: success + type: int + regexMatch: + description: + - 'The value of the header must match the regular expression specified + in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript + For matching against a port specified in the HTTP request, use + a headerMatch with headerName set to PORT and a regular expression + that satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + suffixMatch: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + ignoreCase: + description: + - Specifies that prefixMatch and fullPathMatch matches are case sensitive. + - Defaults to false. + returned: success + type: bool + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing configuration + to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, + xDS clients present node metadata. If a match takes place, the relevant + routing configuration is made available to those proxies. For each + metadataFilter in this list, if its filterMatchCriteria is set to + MATCH_ANY, at least one of the filterLabels must match the corresponding + label provided in the metadata. If its filterMatchCriteria is set + to MATCH_ALL, then all of its filterLabels must match with corresponding + labels in the provided metadata. metadataFilters specified here can + be overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have their + loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterLabels: + description: + - The list of label value pairs that must match labels in the provided + metadata based on filterMatchCriteria This list must not be empty + and can have at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + returned: success + type: str + value: + description: + - The value of the label must match the specified value. value + can have a maximum length of 1024 characters. + returned: success + type: str + filterMatchCriteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: - MATCH_ANY: At least one of the + filterLabels must have a matching label in the provided metadata.' + - "- MATCH_ALL: All filterLabels must have matching labels in the + provided metadata." + returned: success + type: str + prefixMatch: + description: + - For satisfying the matchRule condition, the request's path must begin + with the specified prefixMatch. prefixMatch must begin with a /. The + value must be between 1 and 1024 characters. Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + returned: success + type: str + queryParameterMatches: + description: + - Specifies a list of query parameter match criteria, all of which must + match corresponding query parameters in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + returned: success + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + returned: success + type: str + presentMatch: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the parameter + has a value or not. Only one of presentMatch, exactMatch and regexMatch + must be set. + returned: success + type: bool + regexMatch: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For the + regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be set. + returned: success + type: str + regexMatch: + description: + - For satisfying the matchRule condition, the path of the request must + satisfy the regular expression specified in regexMatch after removing + any query parameters and anchor supplied with the original URL. For + regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + routeAction: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction specifies + any weightedBackendServices, service must not be set. Conversely if service + is set, routeAction cannot contain any weightedBackendServices. Only one + of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This translates + to the Access- Control-Allow-Credentials header. Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults to + false. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can be cached. + This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. As part of fault + injection, when clients send requests to a backend service, delays + can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from + clients can be aborted by the Loadbalancer for a percentage of requests. + timeout and retry_policy will be ignored by clients that are configured + with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow service. Prior to sending + traffic to the shadow service, the host / authority header is suffixed + with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The BackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. + If timeout in HttpRouteAction is not set, will use the largest + timeout among all backend services associated with the route. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or if + the backend service does not respond at all, example: disconnects, + reset, read timeout, connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code. This reset + type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is set + to resource-exhausted * unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable + ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. The value + must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. The weights determine the fraction of traffic that flows + to their corresponding backend service. If all traffic needs to go + to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are applied + depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default BackendService resource. Before forwarding the request + to backendService, the loadbalancer applies any relevant headerActions + specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to + take effect for the selected backendService. headerAction specified + here take effect before headerAction in the enclosing HttpRouteRule, + PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the + request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for + the header, discarding any values that were set for that + header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to the + client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . The selection of a backend service is determined only for new + traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService as + determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction must + not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between 1 + and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to + https. If set to false, the URL scheme of the redirected request will + remain the same as that of the request. This must only be set for + UrlMaps used in TargetHttpProxys. + - Setting this true for TargetHttpsProxy is not permitted. Defaults + to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. Only one of pathRedirect or + prefixRedirect must be specified. The value must be between 1 and + 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. Defaults to false. + returned: success + type: bool + defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected to a + URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps used + in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. + The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied together + with prefixRedirect. Supply one alone or neither. If neither is supplied, + the path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one + alone or neither. If neither is supplied, the path of the original request + will be used for the redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query portion + of the original URL is retained. + returned: success + type: bool + defaultRouteAction: + description: + - defaultRouteAction takes effect when none of the pathRules or routeRules match. + The load balancer performs advanced routing actions like URL rewrites, header + transformations, etc. prior to forwarding the request to the selected backend. + If defaultRouteAction specifies any weightedBackendServices, defaultService + must not be set. + - Conversely if defaultService is set, defaultRouteAction cannot contain any + weightedBackendServices. + - Only one of defaultRouteAction or defaultUrlRedirect must be set. + returned: success + type: complex + contains: + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route match + occurs. + - The weights determine the fraction of traffic that flows to their corresponding + backend service. + - If all traffic needs to go to a single backend service, there must be + one weightedBackendService with weight set to a non 0 number. + - Once a backendService is identified and before forwarding the request + to the backend service, advanced routing actions like Url rewrites and + header transformations are applied depending on additional settings specified + in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the default BackendService resource. Before + forwarding the request to backendService, the loadbalancer applies + any relevant headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . + - The selection of a backend service is determined only for new traffic. + Once a user's request has been directed to a backendService, subsequent + requests will be sent to the same backendService as determined by + the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + headerAction: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. + - headerAction specified here take effect before headerAction in the + enclosing HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + returned: success + type: list + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any + values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back + to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any + values that were set for that header. + returned: success + type: bool + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the request + to the matched service. + returned: success + type: complex + contains: + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, the + matching portion of the request's path is replaced by pathPrefixRewrite. + - The value must be between 1 and 1024 characters. + returned: success + type: str + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. + - The value must be between 1 and 255 characters. + returned: success + type: str + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed from + the time the request has been fully processed (i.e. end-of-stream) up + until the response has been completely processed. Timeout includes all + retries. + - If not specified, will use the largest timeout among all backend services + associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 + hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds field + and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. Valid + values are: * 5xx: Loadbalancer will attempt a retry if the backend + service responds with any 5xx response code, or if the backend service + does not respond at all, example: disconnects, reset, read timeout, + * connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream:Loadbalancer will retry if the backend service resets + the stream with a REFUSED_STREAM error code." + - This reset type indicates that it is safe to retry. + - "* cancelled: Loadbalancer will retry if the gRPC status code in the + response header is set to cancelled * deadline-exceeded: Loadbalancer + will retry if the gRPC status code in the response header is set to + deadline-exceeded * resource-exhausted: Loadbalancer will retry if + the gRPC status code in the response header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in + the response header is set to unavailable ." + returned: success + type: list + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. If + not specified, defaults to 1. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. If + timeout in HttpRouteAction is not set, will use the largest timeout + among all backend services associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. + - Loadbalancer does not wait for responses from the shadow service. Prior + to sending traffic to the shadow service, the host / authority header + is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the BackendService resource being mirrored + to. + returned: success + type: dict + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. Please + see [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + . + returned: success + type: complex + contains: + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS requests. + - An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long results of a preflight request can be cached in + seconds. + - This translates to the Access-Control-Max-Age header. + returned: success + type: int + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. + - This translates to the Access-Control-Allow-Credentials header. + returned: success + type: bool + disabled: + description: + - If true, specifies the CORS policy is disabled. The default value + is false, which indicates that the CORS policy is in effect. + returned: success + type: bool + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. + - As part of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from + clients can be aborted by the Loadbalancer for a percentage of requests. + - timeout and retryPolicy will be ignored by clients that are configured + with a faultInjectionPolicy. + returned: success + type: complex + contains: + delay: + description: + - The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) on + which delay will be introduced as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + abort: + description: + - The specification for how client requests are aborted as part of fault + injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. + - The value must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) which + will be aborted as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str +tests: + description: + - The list of expected URL mapping tests. Request to update this UrlMap will succeed + only if all of the test cases pass. You can specify a maximum of 100 tests per + UrlMap. + returned: success + type: complex + contains: + description: + description: + - Description of this test case. + returned: success + type: str + host: + description: + - Host portion of the URL. + returned: success + type: str + path: + description: + - Path portion of the URL. + returned: success + type: str + service: + description: + - Expected BackendService resource the given URL should be mapped to. + returned: success + type: dict +defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected to a URL + specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the one that + was supplied in the request. The value must be between 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in TargetHttpProxys. + Setting this true for TargetHttpsProxy is not permitted. The default is set + to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the one that + was supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the path + of the original request will be used for the redirect. The value must be between + 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one alone + or neither. If neither is supplied, the path of the original request will + be used for the redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values are: + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to + 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL is removed + prior to redirecting the request. If set to false, the query portion of the + original URL is retained. The default is set to false. + returned: success + type: bool +defaultRouteAction: + description: + - defaultRouteAction takes effect when none of the hostRules match. The load balancer + performs advanced routing actions like URL rewrites, header transformations, etc. + prior to forwarding the request to the selected backend. + - If defaultRouteAction specifies any weightedBackendServices, defaultService must + not be set. Conversely if defaultService is set, defaultRouteAction cannot contain + any weightedBackendServices. + - Only one of defaultRouteAction or defaultUrlRedirect must be set. + returned: success + type: complex + contains: + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route match + occurs. + - The weights determine the fraction of traffic that flows to their corresponding + backend service. + - If all traffic needs to go to a single backend service, there must be one + weightedBackendService with weight set to a non 0 number. + - Once a backendService is identified and before forwarding the request to the + backend service, advanced routing actions like Url rewrites and header transformations + are applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the default BackendService resource. Before + forwarding the request to backendService, the loadbalancer applies any + relevant headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed as + weight / (sum of all weightedBackendService weights in routeAction) . + - The selection of a backend service is determined only for new traffic. + Once a user's request has been directed to a backendService, subsequent + requests will be sent to the same backendService as determined by the + BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. + - headerAction specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + request prior to forwarding the request to the backendService. + returned: success + type: list + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. + - If true, headerValue is set for the header, discarding any values + that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + response prior to sending the response back to the client. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to + the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. + - If true, headerValue is set for the header, discarding any values + that were set for that header. + returned: success + type: bool + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the request + to the matched service. + returned: success + type: complex + contains: + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, the matching + portion of the request's path is replaced by pathPrefixRewrite. + - The value must be between 1 and 1024 characters. + returned: success + type: str + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. + - The value must be between 1 and 255 characters. + returned: success + type: str + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed from the + time the request has been fully processed (i.e. end-of-stream) up until the + response has been completely processed. Timeout includes all retries. + - If not specified, will use the largest timeout among all backend services + associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 seconds field and a positive + nanos field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. Valid values + are: * 5xx: Loadbalancer will attempt a retry if the backend service responds + with any 5xx response code, or if the backend service does not respond + at all, example: disconnects, reset, read timeout, * connection failure, + and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response codes 502, + 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting to + backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream:Loadbalancer will retry if the backend service resets + the stream with a REFUSED_STREAM error code." + - This reset type indicates that it is safe to retry. + - "* cancelled: Loadbalancer will retry if the gRPC status code in the response + header is set to cancelled * deadline-exceeded: Loadbalancer will retry + if the gRPC status code in the response header is set to deadline-exceeded + * resource-exhausted: Loadbalancer will retry if the gRPC status code + in the response header is set to resource-exhausted * unavailable: Loadbalancer + will retry if the gRPC status code in the response header is set to unavailable + ." + returned: success + type: list + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. If not + specified, defaults to 1. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. If timeout + in HttpRouteAction is not set, will use the largest timeout among all + backend services associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 + hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds field + and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends are + shadowed to a separate mirrored backend service. + - Loadbalancer does not wait for responses from the shadow service. Prior to + sending traffic to the shadow service, the host / authority header is suffixed + with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the BackendService resource being mirrored + to. + returned: success + type: dict + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. Please see + [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + . + returned: success + type: complex + contains: + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS requests. + - An origin is allowed if it matches either an item in allowOrigins or an + item in allowOriginRegexes. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins or an + item in allowOriginRegexes. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long results of a preflight request can be cached in seconds. + - This translates to the Access-Control-Max-Age header. + returned: success + type: int + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates that + the actual request can include user credentials. + - This translates to the Access-Control-Allow-Credentials header. + returned: success + type: bool + disabled: + description: + - If true, specifies the CORS policy is disabled. The default value is false, + which indicates that the CORS policy is in effect. + returned: success + type: bool + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test the + resiliency of clients to backend service failure. + - As part of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from clients + can be aborted by the Loadbalancer for a percentage of requests. + - timeout and retryPolicy will be ignored by clients that are configured with + a faultInjectionPolicy. + returned: success + type: complex + contains: + delay: + description: + - The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) on which + delay will be introduced as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + abort: + description: + - The specification for how client requests are aborted as part of fault + injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. + - The value must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) which + will be aborted as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + default_service=dict(type='dict'), + description=dict(type='str'), + fingerprint=dict(type='str'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), header_value=dict(required=True, type='str'), replace=dict(required=True, type='bool') + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), header_value=dict(required=True, type='str'), replace=dict(required=True, type='bool') + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + host_rules=dict( + type='list', + elements='dict', + options=dict( + description=dict(type='str'), hosts=dict(required=True, type='list', elements='str'), path_matcher=dict(required=True, type='str') + ), + ), + name=dict(required=True, type='str'), + path_matchers=dict( + type='list', + elements='dict', + options=dict( + default_service=dict(type='dict'), + description=dict(type='str'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + name=dict(required=True, type='str'), + path_rules=dict( + type='list', + elements='dict', + options=dict( + service=dict(type='dict'), + paths=dict(required=True, type='list', elements='str'), + route_action=dict( + type='dict', + options=dict( + cors_policy=dict( + type='dict', + options=dict( + allow_credentials=dict(type='bool'), + allow_headers=dict(type='list', elements='str'), + allow_methods=dict(type='list', elements='str'), + allow_origin_regexes=dict(type='list', elements='str'), + allow_origins=dict(type='list', elements='str'), + disabled=dict(required=True, type='bool'), + expose_headers=dict(type='list', elements='str'), + max_age=dict(type='int'), + ), + ), + fault_injection_policy=dict( + type='dict', + options=dict( + abort=dict( + type='dict', + options=dict(http_status=dict(required=True, type='int'), percentage=dict(required=True, type='str')), + ), + delay=dict( + type='dict', + options=dict( + fixed_delay=dict( + required=True, + type='dict', + options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str')), + ), + percentage=dict(required=True, type='str'), + ), + ), + ), + ), + request_mirror_policy=dict(type='dict', options=dict(backend_service=dict(required=True, type='dict'))), + retry_policy=dict( + type='dict', + options=dict( + num_retries=dict(type='int'), + per_try_timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + retry_conditions=dict(type='list', elements='str'), + ), + ), + timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + url_rewrite=dict(type='dict', options=dict(host_rewrite=dict(type='str'), path_prefix_rewrite=dict(type='str'))), + weighted_backend_services=dict( + type='list', + elements='dict', + options=dict( + backend_service=dict(required=True, type='dict'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + weight=dict(required=True, type='int'), + ), + ), + ), + ), + url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + ), + ), + route_rules=dict( + type='list', + elements='dict', + options=dict( + priority=dict(required=True, type='int'), + service=dict(type='dict'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + match_rules=dict( + type='list', + elements='dict', + options=dict( + full_path_match=dict(type='str'), + header_matches=dict( + type='list', + elements='dict', + options=dict( + exact_match=dict(type='str'), + header_name=dict(required=True, type='str'), + invert_match=dict(type='bool'), + prefix_match=dict(type='str'), + present_match=dict(type='bool'), + range_match=dict( + type='dict', + options=dict(range_end=dict(required=True, type='int'), range_start=dict(required=True, type='int')), + ), + regex_match=dict(type='str'), + suffix_match=dict(type='str'), + ), + ), + ignore_case=dict(type='bool'), + metadata_filters=dict( + type='list', + elements='dict', + options=dict( + filter_labels=dict( + required=True, + type='list', + elements='dict', + options=dict(name=dict(required=True, type='str'), value=dict(required=True, type='str')), + ), + filter_match_criteria=dict(required=True, type='str'), + ), + ), + prefix_match=dict(type='str'), + query_parameter_matches=dict( + type='list', + elements='dict', + options=dict( + exact_match=dict(type='str'), + name=dict(required=True, type='str'), + present_match=dict(type='bool'), + regex_match=dict(type='str'), + ), + ), + regex_match=dict(type='str'), + ), + ), + route_action=dict( + type='dict', + options=dict( + cors_policy=dict( + type='dict', + options=dict( + allow_credentials=dict(type='bool'), + allow_headers=dict(type='list', elements='str'), + allow_methods=dict(type='list', elements='str'), + allow_origin_regexes=dict(type='list', elements='str'), + allow_origins=dict(type='list', elements='str'), + disabled=dict(type='bool'), + expose_headers=dict(type='list', elements='str'), + max_age=dict(type='int'), + ), + ), + fault_injection_policy=dict( + type='dict', + options=dict( + abort=dict(type='dict', options=dict(http_status=dict(type='int'), percentage=dict(type='str'))), + delay=dict( + type='dict', + options=dict( + fixed_delay=dict( + type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str')) + ), + percentage=dict(type='str'), + ), + ), + ), + ), + request_mirror_policy=dict(type='dict', options=dict(backend_service=dict(required=True, type='dict'))), + retry_policy=dict( + type='dict', + options=dict( + num_retries=dict(required=True, type='int'), + per_try_timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + retry_conditions=dict(type='list', elements='str'), + ), + ), + timeout=dict(type='dict', options=dict(nanos=dict(type='int'), seconds=dict(required=True, type='str'))), + url_rewrite=dict(type='dict', options=dict(host_rewrite=dict(type='str'), path_prefix_rewrite=dict(type='str'))), + weighted_backend_services=dict( + type='list', + elements='dict', + options=dict( + backend_service=dict(required=True, type='dict'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + request_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict( + header_name=dict(required=True, type='str'), + header_value=dict(required=True, type='str'), + replace=dict(required=True, type='bool'), + ), + ), + response_headers_to_remove=dict(type='list', elements='str'), + ), + ), + weight=dict(required=True, type='int'), + ), + ), + ), + ), + url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + ), + ), + default_url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + default_route_action=dict( + type='dict', + options=dict( + weighted_backend_services=dict( + type='list', + elements='dict', + options=dict( + backend_service=dict(type='dict'), + weight=dict(type='int'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_remove=dict(type='list', elements='str'), + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict(header_name=dict(type='str'), header_value=dict(type='str'), replace=dict(type='bool')), + ), + response_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict(header_name=dict(type='str'), header_value=dict(type='str'), replace=dict(type='bool')), + ), + ), + ), + ), + ), + url_rewrite=dict(type='dict', options=dict(path_prefix_rewrite=dict(type='str'), host_rewrite=dict(type='str'))), + timeout=dict(type='dict', options=dict(seconds=dict(type='str'), nanos=dict(type='int'))), + retry_policy=dict( + type='dict', + options=dict( + retry_conditions=dict(type='list', elements='str'), + num_retries=dict(default=1, type='int'), + per_try_timeout=dict(type='dict', options=dict(seconds=dict(type='str'), nanos=dict(type='int'))), + ), + ), + request_mirror_policy=dict(type='dict', options=dict(backend_service=dict(required=True, type='dict'))), + cors_policy=dict( + type='dict', + options=dict( + allow_origins=dict(type='list', elements='str'), + allow_origin_regexes=dict(type='list', elements='str'), + allow_methods=dict(type='list', elements='str'), + allow_headers=dict(type='list', elements='str'), + expose_headers=dict(type='list', elements='str'), + max_age=dict(type='int'), + allow_credentials=dict(type='bool'), + disabled=dict(type='bool'), + ), + ), + fault_injection_policy=dict( + type='dict', + options=dict( + delay=dict( + type='dict', + options=dict( + fixed_delay=dict(type='dict', options=dict(seconds=dict(type='str'), nanos=dict(type='int'))), + percentage=dict(type='str'), + ), + ), + abort=dict(type='dict', options=dict(http_status=dict(type='int'), percentage=dict(type='str'))), + ), + ), + ), + ), + ), + ), + tests=dict( + type='list', + elements='dict', + options=dict( + description=dict(type='str'), + host=dict(required=True, type='str'), + path=dict(required=True, type='str'), + service=dict(required=True, type='dict'), + ), + ), + default_url_redirect=dict( + type='dict', + options=dict( + host_redirect=dict(type='str'), + https_redirect=dict(type='bool'), + path_redirect=dict(type='str'), + prefix_redirect=dict(type='str'), + redirect_response_code=dict(type='str'), + strip_query=dict(type='bool'), + ), + ), + default_route_action=dict( + type='dict', + options=dict( + weighted_backend_services=dict( + type='list', + elements='dict', + options=dict( + backend_service=dict(type='dict'), + weight=dict(type='int'), + header_action=dict( + type='dict', + options=dict( + request_headers_to_remove=dict(type='list', elements='str'), + request_headers_to_add=dict( + type='list', + elements='dict', + options=dict(header_name=dict(type='str'), header_value=dict(type='str'), replace=dict(type='bool')), + ), + response_headers_to_remove=dict(type='list', elements='str'), + response_headers_to_add=dict( + type='list', + elements='dict', + options=dict(header_name=dict(type='str'), header_value=dict(type='str'), replace=dict(type='bool')), + ), + ), + ), + ), + ), + url_rewrite=dict(type='dict', options=dict(path_prefix_rewrite=dict(type='str'), host_rewrite=dict(type='str'))), + timeout=dict(type='dict', options=dict(seconds=dict(type='str'), nanos=dict(type='int'))), + retry_policy=dict( + type='dict', + options=dict( + retry_conditions=dict(type='list', elements='str'), + num_retries=dict(default=1, type='int'), + per_try_timeout=dict(type='dict', options=dict(seconds=dict(type='str'), nanos=dict(type='int'))), + ), + ), + request_mirror_policy=dict(type='dict', options=dict(backend_service=dict(required=True, type='dict'))), + cors_policy=dict( + type='dict', + options=dict( + allow_origins=dict(type='list', elements='str'), + allow_origin_regexes=dict(type='list', elements='str'), + allow_methods=dict(type='list', elements='str'), + allow_headers=dict(type='list', elements='str'), + expose_headers=dict(type='list', elements='str'), + max_age=dict(type='int'), + allow_credentials=dict(type='bool'), + disabled=dict(type='bool'), + ), + ), + fault_injection_policy=dict( + type='dict', + options=dict( + delay=dict( + type='dict', + options=dict( + fixed_delay=dict(type='dict', options=dict(seconds=dict(type='str'), nanos=dict(type='int'))), percentage=dict(type='str') + ), + ), + abort=dict(type='dict', options=dict(http_status=dict(type='int'), percentage=dict(type='str'))), + ), + ), + ), + ), + ), + mutually_exclusive=[['default_route_action', 'default_url_redirect']], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#urlMap' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + module.params['fingerprint'] = fetch['fingerprint'] + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#urlMap', + u'defaultService': replace_resource_dict(module.params.get(u'default_service', {}), 'selfLink'), + u'description': module.params.get('description'), + u'headerAction': UrlMapHeaderaction(module.params.get('header_action', {}), module).to_request(), + u'hostRules': UrlMapHostrulesArray(module.params.get('host_rules', []), module).to_request(), + u'name': module.params.get('name'), + u'pathMatchers': UrlMapPathmatchersArray(module.params.get('path_matchers', []), module).to_request(), + u'tests': UrlMapTestsArray(module.params.get('tests', []), module).to_request(), + u'defaultUrlRedirect': UrlMapDefaulturlredirect(module.params.get('default_url_redirect', {}), module).to_request(), + u'defaultRouteAction': UrlMapDefaultrouteaction(module.params.get('default_route_action', {}), module).to_request(), + u'fingerprint': module.params.get('fingerprint') + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/urlMaps/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/urlMaps".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'creationTimestamp': response.get(u'creationTimestamp'), + u'defaultService': response.get(u'defaultService'), + u'description': response.get(u'description'), + u'id': response.get(u'id'), + u'fingerprint': response.get(u'fingerprint'), + u'headerAction': UrlMapHeaderaction(response.get(u'headerAction', {}), module).from_response(), + u'hostRules': UrlMapHostrulesArray(response.get(u'hostRules', []), module).from_response(), + u'name': module.params.get('name'), + u'pathMatchers': UrlMapPathmatchersArray(response.get(u'pathMatchers', []), module).from_response(), + u'tests': UrlMapTestsArray(response.get(u'tests', []), module).from_response(), + u'defaultUrlRedirect': UrlMapDefaulturlredirect(response.get(u'defaultUrlRedirect', {}), module).from_response(), + u'defaultRouteAction': UrlMapDefaultrouteaction(response.get(u'defaultRouteAction', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#urlMap') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapHostrulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'description': item.get('description'), u'hosts': item.get('hosts'), u'pathMatcher': item.get('path_matcher')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'description': item.get(u'description'), u'hosts': item.get(u'hosts'), u'pathMatcher': item.get(u'pathMatcher')}) + + +class UrlMapPathmatchersArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'defaultService': replace_resource_dict(item.get(u'default_service', {}), 'selfLink'), + u'description': item.get('description'), + u'headerAction': UrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'name': item.get('name'), + u'pathRules': UrlMapPathrulesArray(item.get('path_rules', []), self.module).to_request(), + u'routeRules': UrlMapRouterulesArray(item.get('route_rules', []), self.module).to_request(), + u'defaultUrlRedirect': UrlMapDefaulturlredirect(item.get('default_url_redirect', {}), self.module).to_request(), + u'defaultRouteAction': UrlMapDefaultrouteaction(item.get('default_route_action', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'defaultService': item.get(u'defaultService'), + u'description': item.get(u'description'), + u'headerAction': UrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'name': item.get(u'name'), + u'pathRules': UrlMapPathrulesArray(item.get(u'pathRules', []), self.module).from_response(), + u'routeRules': UrlMapRouterulesArray(item.get(u'routeRules', []), self.module).from_response(), + u'defaultUrlRedirect': UrlMapDefaulturlredirect(item.get(u'defaultUrlRedirect', {}), self.module).from_response(), + u'defaultRouteAction': UrlMapDefaultrouteaction(item.get(u'defaultRouteAction', {}), self.module).from_response(), + } + ) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapPathrulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink'), + u'paths': item.get('paths'), + u'routeAction': UrlMapRouteaction(item.get('route_action', {}), self.module).to_request(), + u'urlRedirect': UrlMapUrlredirect(item.get('url_redirect', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'service': item.get(u'service'), + u'paths': item.get(u'paths'), + u'routeAction': UrlMapRouteaction(item.get(u'routeAction', {}), self.module).from_response(), + u'urlRedirect': UrlMapUrlredirect(item.get(u'urlRedirect', {}), self.module).from_response(), + } + ) + + +class UrlMapRouteaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'corsPolicy': UrlMapCorspolicy(self.request.get('cors_policy', {}), self.module).to_request(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get('fault_injection_policy', {}), self.module).to_request(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get('request_mirror_policy', {}), self.module).to_request(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get('retry_policy', {}), self.module).to_request(), + u'timeout': UrlMapTimeout(self.request.get('timeout', {}), self.module).to_request(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get('url_rewrite', {}), self.module).to_request(), + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get('weighted_backend_services', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'corsPolicy': UrlMapCorspolicy(self.request.get(u'corsPolicy', {}), self.module).from_response(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get(u'faultInjectionPolicy', {}), self.module).from_response(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get(u'requestMirrorPolicy', {}), self.module).from_response(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get(u'retryPolicy', {}), self.module).from_response(), + u'timeout': UrlMapTimeout(self.request.get(u'timeout', {}), self.module).from_response(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get(u'urlRewrite', {}), self.module).from_response(), + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get(u'weightedBackendServices', []), self.module).from_response(), + } + ) + + +class UrlMapCorspolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get('allow_credentials'), + u'allowHeaders': self.request.get('allow_headers'), + u'allowMethods': self.request.get('allow_methods'), + u'allowOriginRegexes': self.request.get('allow_origin_regexes'), + u'allowOrigins': self.request.get('allow_origins'), + u'disabled': self.request.get('disabled'), + u'exposeHeaders': self.request.get('expose_headers'), + u'maxAge': self.request.get('max_age'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get(u'allowCredentials'), + u'allowHeaders': self.request.get(u'allowHeaders'), + u'allowMethods': self.request.get(u'allowMethods'), + u'allowOriginRegexes': self.request.get(u'allowOriginRegexes'), + u'allowOrigins': self.request.get(u'allowOrigins'), + u'disabled': self.request.get(u'disabled'), + u'exposeHeaders': self.request.get(u'exposeHeaders'), + u'maxAge': self.request.get(u'maxAge'), + } + ) + + +class UrlMapFaultinjectionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'abort': UrlMapAbort(self.request.get('abort', {}), self.module).to_request(), + u'delay': UrlMapDelay(self.request.get('delay', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'abort': UrlMapAbort(self.request.get(u'abort', {}), self.module).from_response(), + u'delay': UrlMapDelay(self.request.get(u'delay', {}), self.module).from_response(), + } + ) + + +class UrlMapAbort(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'httpStatus': self.request.get('http_status'), u'percentage': self.request.get('percentage')}) + + def from_response(self): + return remove_nones_from_dict({u'httpStatus': self.request.get(u'httpStatus'), u'percentage': self.request.get(u'percentage')}) + + +class UrlMapDelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get('fixed_delay', {}), self.module).to_request(), u'percentage': self.request.get('percentage')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get(u'fixedDelay', {}), self.module).from_response(), u'percentage': self.request.get(u'percentage')} + ) + + +class UrlMapFixeddelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class UrlMapRequestmirrorpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'backendService': replace_resource_dict(self.request.get(u'backend_service', {}), 'selfLink')}) + + def from_response(self): + return remove_nones_from_dict({u'backendService': self.request.get(u'backendService')}) + + +class UrlMapRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get('num_retries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get('per_try_timeout', {}), self.module).to_request(), + u'retryConditions': self.request.get('retry_conditions'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get(u'numRetries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get(u'perTryTimeout', {}), self.module).from_response(), + u'retryConditions': self.request.get(u'retryConditions'), + } + ) + + +class UrlMapPertrytimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class UrlMapTimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class UrlMapUrlrewrite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get('host_rewrite'), u'pathPrefixRewrite': self.request.get('path_prefix_rewrite')}) + + def from_response(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get(u'hostRewrite'), u'pathPrefixRewrite': self.request.get(u'pathPrefixRewrite')}) + + +class UrlMapWeightedbackendservicesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'backendService': replace_resource_dict(item.get(u'backend_service', {}), 'selfLink'), + u'headerAction': UrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'weight': item.get('weight'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'backendService': item.get(u'backendService'), + u'headerAction': UrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'weight': item.get(u'weight'), + } + ) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapUrlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class UrlMapRouterulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'priority': item.get('priority'), + u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink'), + u'headerAction': UrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'matchRules': UrlMapMatchrulesArray(item.get('match_rules', []), self.module).to_request(), + u'routeAction': UrlMapRouteaction(item.get('route_action', {}), self.module).to_request(), + u'urlRedirect': UrlMapUrlredirect(item.get('url_redirect', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'priority': item.get(u'priority'), + u'service': item.get(u'service'), + u'headerAction': UrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'matchRules': UrlMapMatchrulesArray(item.get(u'matchRules', []), self.module).from_response(), + u'routeAction': UrlMapRouteaction(item.get(u'routeAction', {}), self.module).from_response(), + u'urlRedirect': UrlMapUrlredirect(item.get(u'urlRedirect', {}), self.module).from_response(), + } + ) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapMatchrulesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'fullPathMatch': item.get('full_path_match'), + u'headerMatches': UrlMapHeadermatchesArray(item.get('header_matches', []), self.module).to_request(), + u'ignoreCase': item.get('ignore_case'), + u'metadataFilters': UrlMapMetadatafiltersArray(item.get('metadata_filters', []), self.module).to_request(), + u'prefixMatch': item.get('prefix_match'), + u'queryParameterMatches': UrlMapQueryparametermatchesArray(item.get('query_parameter_matches', []), self.module).to_request(), + u'regexMatch': item.get('regex_match'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'fullPathMatch': item.get(u'fullPathMatch'), + u'headerMatches': UrlMapHeadermatchesArray(item.get(u'headerMatches', []), self.module).from_response(), + u'ignoreCase': item.get(u'ignoreCase'), + u'metadataFilters': UrlMapMetadatafiltersArray(item.get(u'metadataFilters', []), self.module).from_response(), + u'prefixMatch': item.get(u'prefixMatch'), + u'queryParameterMatches': UrlMapQueryparametermatchesArray(item.get(u'queryParameterMatches', []), self.module).from_response(), + u'regexMatch': item.get(u'regexMatch'), + } + ) + + +class UrlMapHeadermatchesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get('exact_match'), + u'headerName': item.get('header_name'), + u'invertMatch': item.get('invert_match'), + u'prefixMatch': item.get('prefix_match'), + u'presentMatch': item.get('present_match'), + u'rangeMatch': UrlMapRangematch(item.get('range_match', {}), self.module).to_request(), + u'regexMatch': item.get('regex_match'), + u'suffixMatch': item.get('suffix_match'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get(u'exactMatch'), + u'headerName': item.get(u'headerName'), + u'invertMatch': item.get(u'invertMatch'), + u'prefixMatch': item.get(u'prefixMatch'), + u'presentMatch': item.get(u'presentMatch'), + u'rangeMatch': UrlMapRangematch(item.get(u'rangeMatch', {}), self.module).from_response(), + u'regexMatch': item.get(u'regexMatch'), + u'suffixMatch': item.get(u'suffixMatch'), + } + ) + + +class UrlMapRangematch(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rangeEnd': self.request.get('range_end'), u'rangeStart': self.request.get('range_start')}) + + def from_response(self): + return remove_nones_from_dict({u'rangeEnd': self.request.get(u'rangeEnd'), u'rangeStart': self.request.get(u'rangeStart')}) + + +class UrlMapMetadatafiltersArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'filterLabels': UrlMapFilterlabelsArray(item.get('filter_labels', []), self.module).to_request(), + u'filterMatchCriteria': item.get('filter_match_criteria'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'filterLabels': UrlMapFilterlabelsArray(item.get(u'filterLabels', []), self.module).from_response(), + u'filterMatchCriteria': item.get(u'filterMatchCriteria'), + } + ) + + +class UrlMapFilterlabelsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'value': item.get('value')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'value': item.get(u'value')}) + + +class UrlMapQueryparametermatchesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get('exact_match'), + u'name': item.get('name'), + u'presentMatch': item.get('present_match'), + u'regexMatch': item.get('regex_match'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'exactMatch': item.get(u'exactMatch'), + u'name': item.get(u'name'), + u'presentMatch': item.get(u'presentMatch'), + u'regexMatch': item.get(u'regexMatch'), + } + ) + + +class UrlMapRouteaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'corsPolicy': UrlMapCorspolicy(self.request.get('cors_policy', {}), self.module).to_request(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get('fault_injection_policy', {}), self.module).to_request(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get('request_mirror_policy', {}), self.module).to_request(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get('retry_policy', {}), self.module).to_request(), + u'timeout': UrlMapTimeout(self.request.get('timeout', {}), self.module).to_request(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get('url_rewrite', {}), self.module).to_request(), + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get('weighted_backend_services', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'corsPolicy': UrlMapCorspolicy(self.request.get(u'corsPolicy', {}), self.module).from_response(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get(u'faultInjectionPolicy', {}), self.module).from_response(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get(u'requestMirrorPolicy', {}), self.module).from_response(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get(u'retryPolicy', {}), self.module).from_response(), + u'timeout': UrlMapTimeout(self.request.get(u'timeout', {}), self.module).from_response(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get(u'urlRewrite', {}), self.module).from_response(), + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get(u'weightedBackendServices', []), self.module).from_response(), + } + ) + + +class UrlMapCorspolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get('allow_credentials'), + u'allowHeaders': self.request.get('allow_headers'), + u'allowMethods': self.request.get('allow_methods'), + u'allowOriginRegexes': self.request.get('allow_origin_regexes'), + u'allowOrigins': self.request.get('allow_origins'), + u'disabled': self.request.get('disabled'), + u'exposeHeaders': self.request.get('expose_headers'), + u'maxAge': self.request.get('max_age'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowCredentials': self.request.get(u'allowCredentials'), + u'allowHeaders': self.request.get(u'allowHeaders'), + u'allowMethods': self.request.get(u'allowMethods'), + u'allowOriginRegexes': self.request.get(u'allowOriginRegexes'), + u'allowOrigins': self.request.get(u'allowOrigins'), + u'disabled': self.request.get(u'disabled'), + u'exposeHeaders': self.request.get(u'exposeHeaders'), + u'maxAge': self.request.get(u'maxAge'), + } + ) + + +class UrlMapFaultinjectionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'abort': UrlMapAbort(self.request.get('abort', {}), self.module).to_request(), + u'delay': UrlMapDelay(self.request.get('delay', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'abort': UrlMapAbort(self.request.get(u'abort', {}), self.module).from_response(), + u'delay': UrlMapDelay(self.request.get(u'delay', {}), self.module).from_response(), + } + ) + + +class UrlMapAbort(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'httpStatus': self.request.get('http_status'), u'percentage': self.request.get('percentage')}) + + def from_response(self): + return remove_nones_from_dict({u'httpStatus': self.request.get(u'httpStatus'), u'percentage': self.request.get(u'percentage')}) + + +class UrlMapDelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get('fixed_delay', {}), self.module).to_request(), u'percentage': self.request.get('percentage')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get(u'fixedDelay', {}), self.module).from_response(), u'percentage': self.request.get(u'percentage')} + ) + + +class UrlMapFixeddelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class UrlMapRequestmirrorpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'backendService': replace_resource_dict(self.request.get(u'backend_service', {}), 'selfLink')}) + + def from_response(self): + return remove_nones_from_dict({u'backendService': self.request.get(u'backendService')}) + + +class UrlMapRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get('num_retries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get('per_try_timeout', {}), self.module).to_request(), + u'retryConditions': self.request.get('retry_conditions'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'numRetries': self.request.get(u'numRetries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get(u'perTryTimeout', {}), self.module).from_response(), + u'retryConditions': self.request.get(u'retryConditions'), + } + ) + + +class UrlMapPertrytimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class UrlMapTimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nanos': self.request.get('nanos'), u'seconds': self.request.get('seconds')}) + + def from_response(self): + return remove_nones_from_dict({u'nanos': self.request.get(u'nanos'), u'seconds': self.request.get(u'seconds')}) + + +class UrlMapUrlrewrite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get('host_rewrite'), u'pathPrefixRewrite': self.request.get('path_prefix_rewrite')}) + + def from_response(self): + return remove_nones_from_dict({u'hostRewrite': self.request.get(u'hostRewrite'), u'pathPrefixRewrite': self.request.get(u'pathPrefixRewrite')}) + + +class UrlMapWeightedbackendservicesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'backendService': replace_resource_dict(item.get(u'backend_service', {}), 'selfLink'), + u'headerAction': UrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + u'weight': item.get('weight'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'backendService': item.get(u'backendService'), + u'headerAction': UrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + u'weight': item.get(u'weight'), + } + ) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapUrlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class UrlMapDefaulturlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class UrlMapDefaultrouteaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get('weighted_backend_services', []), self.module).to_request(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get('url_rewrite', {}), self.module).to_request(), + u'timeout': UrlMapTimeout(self.request.get('timeout', {}), self.module).to_request(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get('retry_policy', {}), self.module).to_request(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get('request_mirror_policy', {}), self.module).to_request(), + u'corsPolicy': UrlMapCorspolicy(self.request.get('cors_policy', {}), self.module).to_request(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get('fault_injection_policy', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get(u'weightedBackendServices', []), self.module).from_response(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get(u'urlRewrite', {}), self.module).from_response(), + u'timeout': UrlMapTimeout(self.request.get(u'timeout', {}), self.module).from_response(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get(u'retryPolicy', {}), self.module).from_response(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get(u'requestMirrorPolicy', {}), self.module).from_response(), + u'corsPolicy': UrlMapCorspolicy(self.request.get(u'corsPolicy', {}), self.module).from_response(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get(u'faultInjectionPolicy', {}), self.module).from_response(), + } + ) + + +class UrlMapWeightedbackendservicesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'backendService': replace_resource_dict(item.get(u'backend_service', {}), 'selfLink'), + u'weight': item.get('weight'), + u'headerAction': UrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'backendService': item.get(u'backendService'), + u'weight': item.get(u'weight'), + u'headerAction': UrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + } + ) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapUrlrewrite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'pathPrefixRewrite': self.request.get('path_prefix_rewrite'), u'hostRewrite': self.request.get('host_rewrite')}) + + def from_response(self): + return remove_nones_from_dict({u'pathPrefixRewrite': self.request.get(u'pathPrefixRewrite'), u'hostRewrite': self.request.get(u'hostRewrite')}) + + +class UrlMapTimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class UrlMapRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'retryConditions': self.request.get('retry_conditions'), + u'numRetries': self.request.get('num_retries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get('per_try_timeout', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'retryConditions': self.request.get(u'retryConditions'), + u'numRetries': self.request.get(u'numRetries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get(u'perTryTimeout', {}), self.module).from_response(), + } + ) + + +class UrlMapPertrytimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class UrlMapRequestmirrorpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'backendService': replace_resource_dict(self.request.get(u'backend_service', {}), 'selfLink')}) + + def from_response(self): + return remove_nones_from_dict({u'backendService': self.request.get(u'backendService')}) + + +class UrlMapCorspolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowOrigins': self.request.get('allow_origins'), + u'allowOriginRegexes': self.request.get('allow_origin_regexes'), + u'allowMethods': self.request.get('allow_methods'), + u'allowHeaders': self.request.get('allow_headers'), + u'exposeHeaders': self.request.get('expose_headers'), + u'maxAge': self.request.get('max_age'), + u'allowCredentials': self.request.get('allow_credentials'), + u'disabled': self.request.get('disabled'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowOrigins': self.request.get(u'allowOrigins'), + u'allowOriginRegexes': self.request.get(u'allowOriginRegexes'), + u'allowMethods': self.request.get(u'allowMethods'), + u'allowHeaders': self.request.get(u'allowHeaders'), + u'exposeHeaders': self.request.get(u'exposeHeaders'), + u'maxAge': self.request.get(u'maxAge'), + u'allowCredentials': self.request.get(u'allowCredentials'), + u'disabled': self.request.get(u'disabled'), + } + ) + + +class UrlMapFaultinjectionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'delay': UrlMapDelay(self.request.get('delay', {}), self.module).to_request(), + u'abort': UrlMapAbort(self.request.get('abort', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'delay': UrlMapDelay(self.request.get(u'delay', {}), self.module).from_response(), + u'abort': UrlMapAbort(self.request.get(u'abort', {}), self.module).from_response(), + } + ) + + +class UrlMapDelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get('fixed_delay', {}), self.module).to_request(), u'percentage': self.request.get('percentage')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get(u'fixedDelay', {}), self.module).from_response(), u'percentage': self.request.get(u'percentage')} + ) + + +class UrlMapFixeddelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class UrlMapAbort(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'httpStatus': self.request.get('http_status'), u'percentage': self.request.get('percentage')}) + + def from_response(self): + return remove_nones_from_dict({u'httpStatus': self.request.get(u'httpStatus'), u'percentage': self.request.get(u'percentage')}) + + +class UrlMapTestsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'description': item.get('description'), + u'host': item.get('host'), + u'path': item.get('path'), + u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'description': item.get(u'description'), u'host': item.get(u'host'), u'path': item.get(u'path'), u'service': item.get(u'service')} + ) + + +class UrlMapDefaulturlredirect(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get('host_redirect'), + u'httpsRedirect': self.request.get('https_redirect'), + u'pathRedirect': self.request.get('path_redirect'), + u'prefixRedirect': self.request.get('prefix_redirect'), + u'redirectResponseCode': self.request.get('redirect_response_code'), + u'stripQuery': self.request.get('strip_query'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'hostRedirect': self.request.get(u'hostRedirect'), + u'httpsRedirect': self.request.get(u'httpsRedirect'), + u'pathRedirect': self.request.get(u'pathRedirect'), + u'prefixRedirect': self.request.get(u'prefixRedirect'), + u'redirectResponseCode': self.request.get(u'redirectResponseCode'), + u'stripQuery': self.request.get(u'stripQuery'), + } + ) + + +class UrlMapDefaultrouteaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get('weighted_backend_services', []), self.module).to_request(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get('url_rewrite', {}), self.module).to_request(), + u'timeout': UrlMapTimeout(self.request.get('timeout', {}), self.module).to_request(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get('retry_policy', {}), self.module).to_request(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get('request_mirror_policy', {}), self.module).to_request(), + u'corsPolicy': UrlMapCorspolicy(self.request.get('cors_policy', {}), self.module).to_request(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get('fault_injection_policy', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'weightedBackendServices': UrlMapWeightedbackendservicesArray(self.request.get(u'weightedBackendServices', []), self.module).from_response(), + u'urlRewrite': UrlMapUrlrewrite(self.request.get(u'urlRewrite', {}), self.module).from_response(), + u'timeout': UrlMapTimeout(self.request.get(u'timeout', {}), self.module).from_response(), + u'retryPolicy': UrlMapRetrypolicy(self.request.get(u'retryPolicy', {}), self.module).from_response(), + u'requestMirrorPolicy': UrlMapRequestmirrorpolicy(self.request.get(u'requestMirrorPolicy', {}), self.module).from_response(), + u'corsPolicy': UrlMapCorspolicy(self.request.get(u'corsPolicy', {}), self.module).from_response(), + u'faultInjectionPolicy': UrlMapFaultinjectionpolicy(self.request.get(u'faultInjectionPolicy', {}), self.module).from_response(), + } + ) + + +class UrlMapWeightedbackendservicesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'backendService': replace_resource_dict(item.get(u'backend_service', {}), 'selfLink'), + u'weight': item.get('weight'), + u'headerAction': UrlMapHeaderaction(item.get('header_action', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'backendService': item.get(u'backendService'), + u'weight': item.get(u'weight'), + u'headerAction': UrlMapHeaderaction(item.get(u'headerAction', {}), self.module).from_response(), + } + ) + + +class UrlMapHeaderaction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'requestHeadersToRemove': self.request.get('request_headers_to_remove'), + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get('request_headers_to_add', []), self.module).to_request(), + u'responseHeadersToRemove': self.request.get('response_headers_to_remove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get('response_headers_to_add', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'requestHeadersToRemove': self.request.get(u'requestHeadersToRemove'), + u'requestHeadersToAdd': UrlMapRequestheaderstoaddArray(self.request.get(u'requestHeadersToAdd', []), self.module).from_response(), + u'responseHeadersToRemove': self.request.get(u'responseHeadersToRemove'), + u'responseHeadersToAdd': UrlMapResponseheaderstoaddArray(self.request.get(u'responseHeadersToAdd', []), self.module).from_response(), + } + ) + + +class UrlMapRequestheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapResponseheaderstoaddArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'headerName': item.get('header_name'), u'headerValue': item.get('header_value'), u'replace': item.get('replace')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'headerName': item.get(u'headerName'), u'headerValue': item.get(u'headerValue'), u'replace': item.get(u'replace')}) + + +class UrlMapUrlrewrite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'pathPrefixRewrite': self.request.get('path_prefix_rewrite'), u'hostRewrite': self.request.get('host_rewrite')}) + + def from_response(self): + return remove_nones_from_dict({u'pathPrefixRewrite': self.request.get(u'pathPrefixRewrite'), u'hostRewrite': self.request.get(u'hostRewrite')}) + + +class UrlMapTimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class UrlMapRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'retryConditions': self.request.get('retry_conditions'), + u'numRetries': self.request.get('num_retries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get('per_try_timeout', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'retryConditions': self.request.get(u'retryConditions'), + u'numRetries': self.request.get(u'numRetries'), + u'perTryTimeout': UrlMapPertrytimeout(self.request.get(u'perTryTimeout', {}), self.module).from_response(), + } + ) + + +class UrlMapPertrytimeout(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class UrlMapRequestmirrorpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'backendService': replace_resource_dict(self.request.get(u'backend_service', {}), 'selfLink')}) + + def from_response(self): + return remove_nones_from_dict({u'backendService': self.request.get(u'backendService')}) + + +class UrlMapCorspolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'allowOrigins': self.request.get('allow_origins'), + u'allowOriginRegexes': self.request.get('allow_origin_regexes'), + u'allowMethods': self.request.get('allow_methods'), + u'allowHeaders': self.request.get('allow_headers'), + u'exposeHeaders': self.request.get('expose_headers'), + u'maxAge': self.request.get('max_age'), + u'allowCredentials': self.request.get('allow_credentials'), + u'disabled': self.request.get('disabled'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'allowOrigins': self.request.get(u'allowOrigins'), + u'allowOriginRegexes': self.request.get(u'allowOriginRegexes'), + u'allowMethods': self.request.get(u'allowMethods'), + u'allowHeaders': self.request.get(u'allowHeaders'), + u'exposeHeaders': self.request.get(u'exposeHeaders'), + u'maxAge': self.request.get(u'maxAge'), + u'allowCredentials': self.request.get(u'allowCredentials'), + u'disabled': self.request.get(u'disabled'), + } + ) + + +class UrlMapFaultinjectionpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'delay': UrlMapDelay(self.request.get('delay', {}), self.module).to_request(), + u'abort': UrlMapAbort(self.request.get('abort', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'delay': UrlMapDelay(self.request.get(u'delay', {}), self.module).from_response(), + u'abort': UrlMapAbort(self.request.get(u'abort', {}), self.module).from_response(), + } + ) + + +class UrlMapDelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get('fixed_delay', {}), self.module).to_request(), u'percentage': self.request.get('percentage')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'fixedDelay': UrlMapFixeddelay(self.request.get(u'fixedDelay', {}), self.module).from_response(), u'percentage': self.request.get(u'percentage')} + ) + + +class UrlMapFixeddelay(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'seconds': self.request.get('seconds'), u'nanos': self.request.get('nanos')}) + + def from_response(self): + return remove_nones_from_dict({u'seconds': self.request.get(u'seconds'), u'nanos': self.request.get(u'nanos')}) + + +class UrlMapAbort(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'httpStatus': self.request.get('http_status'), u'percentage': self.request.get('percentage')}) + + def from_response(self): + return remove_nones_from_dict({u'httpStatus': self.request.get(u'httpStatus'), u'percentage': self.request.get(u'percentage')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map_info.py new file mode 100644 index 000000000..0bbc26207 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_url_map_info.py @@ -0,0 +1,2541 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_url_map_info +description: +- Gather info for GCP UrlMap +short_description: Gather info for GCP UrlMap +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an URL map + gcp_compute_url_map_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + defaultService: + description: + - The full or partial URL of the defaultService resource to which traffic is + directed if none of the hostRules match. If defaultRouteAction is additionally + specified, advanced routing actions like URL Rewrites, etc. take effect prior + to sending the request to the backend. However, if defaultService is specified, + defaultRouteAction cannot contain any weightedBackendServices. Conversely, + if routeAction specifies any weightedBackendServices, service must not be + specified. Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService + must be set. + returned: success + type: dict + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. The headerAction specified here take effect + after headerAction specified under pathMatcher. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request to + the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to the + client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + returned: success + type: list + hostRules: + description: + - The list of HostRules to use against the URL. + returned: success + type: complex + contains: + description: + description: + - An optional description of this resource. Provide this property when you + create the resource. + returned: success + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except + * will match any string of ([a-z0-9-.]*). In that case, * must be the + first character and must be followed in the pattern by either - or . + returned: success + type: list + pathMatcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL + if the hostRule matches the URL's host portion. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + pathMatchers: + description: + - The list of named PathMatchers to use against the URL. + returned: success + type: complex + contains: + defaultService: + description: + - 'The full or partial URL to the BackendService resource. This will be + used if none of the pathRules or routeRules defined by this PathMatcher + are matched. For example, the following are all valid URLs to a BackendService + resource: - U(https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService) + - compute/v1/projects/project/global/backendServices/backendService - + global/backendServices/backendService If defaultRouteAction is additionally + specified, advanced routing actions like URL Rewrites, etc. take effect + prior to sending the request to the backend. However, if defaultService + is specified, defaultRouteAction cannot contain any weightedBackendServices. + Conversely, if defaultRouteAction specifies any weightedBackendServices, + defaultService must not be specified.' + - 'Only one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService + must be set. Authorization requires one or more of the following Google + IAM permissions on the specified resource defaultService: - compute.backendBuckets.use + - compute.backendServices.use .' + returned: success + type: dict + description: + description: + - An optional description of this resource. Provide this property when you + create the resource. + returned: success + type: str + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. HeaderAction specified here are applied + after the matching HttpRouteRule HeaderAction and before the HeaderAction + in the UrlMap . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to + the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + response prior to sending the response back to the client. + returned: success + type: list + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + returned: success + type: str + pathRules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the + longest-path-first basis. For example: a pathRule with a path /a/b/c/* + will match before /a/b/* irrespective of the order in which those paths + appear in this list. Within a given pathMatcher, only one of pathRules + or routeRules must be set.' + returned: success + type: complex + contains: + service: + description: + - The backend service resource to which traffic is directed if this + rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + returned: success + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the + only place a \\* is allowed is at the end following a /. The string + fed to the path matcher does not include any text after the first + ? or #, and those chars are not allowed here.' + returned: success + type: list + routeAction: + description: + - In response to a matching path, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction + specifies any weightedBackendServices, service must not be set. Conversely + if service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This + translates to the Access- Control-Allow-Credentials header. + Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers + header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods + header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or + allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can + be cached. This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. + Similarly requests from clients can be aborted by the Loadbalancer + for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The + value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be + from 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The BackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be + > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry + if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: + disconnects, reset, read timeout, connection failure, and + refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend + service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header + is set to resource-exhausted * unavailable: Loadbalancer will + retry if the gRPC status code in the response header is set + to unavailable ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding + the request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a + route match occurs. The weights determine the fraction of traffic + that flows to their corresponding backend service. If all traffic + needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default BackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, + computed as weight / (sum of all weightedBackendService weights + in routeAction) . The selection of a backend service is determined + only for new traffic. Once a user's request has been directed + to a backendService, subsequent requests will be sent to the + same backendService as determined by the BackendService's + session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When a path pattern is matched, the request is redirected to a URL + specified by urlRedirect. If urlRedirect is specified, service or + routeAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of + the one that was supplied in the request. The value must be between + 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. + - If set to false, the URL scheme of the redirected request will + remain the same as that of the request. This must only be set + for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of + the one that was supplied in the request. pathRedirect cannot + be supplied together with prefixRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request + will be used for the redirect. + - The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting + the request. prefixRedirect cannot be supplied together with pathRedirect. + Supply one alone or neither. If neither is supplied, the path + of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default + value and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, + the request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original + URL is removed prior to redirecting the request. If set to false, + the query portion of the original URL is retained. + returned: success + type: bool + routeRules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order + of specifying routeRules matters: the first rule that matches will cause + its specified routing action to take effect. Within a given pathMatcher, + only one of pathRules or routeRules must be set. routeRules are not supported + in UrlMaps intended for External load balancers.' + returned: success + type: complex + contains: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the + order in which load balancer will interpret routeRules. RouteRules + are evaluated in order of priority, from the lowest to highest number. + The priority of a rule decreases as its number increases (1, 2, 3, + N+1). The first rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove + rules in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to + which you could add rules numbered from 6 to 8, 10 to 11, and 13 to + 15 in the future without any impact on existing rules. + returned: success + type: int + service: + description: + - The backend service resource to which traffic is directed if this + rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. The headerAction specified + here are applied before the matching pathMatchers[].headerAction and + after pathMatchers[].routeRules[].r outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the + header, discarding any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back + to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the + header, discarding any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + returned: success + type: list + matchRules: + description: + - The rules for determining a match. + returned: success + type: complex + contains: + fullPathMatch: + description: + - For satisfying the matchRule condition, the path of the request + must exactly match the value specified in fullPathMatch after + removing any query parameters and anchor that may be part of the + original URL. FullPathMatch must be between 1 and 1024 characters. + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + headerMatches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The value should exactly match contents of exactMatch. Only + one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + returned: success + type: str + headerName: + description: + - The name of the HTTP header to match. For matching against + the HTTP request's authority, use a headerMatch with the header + name ":authority". For matching a request's method, use the + headerName ":method". + returned: success + type: str + invertMatch: + description: + - If set to false, the headerMatch is considered a match if + the match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT + met. Defaults to false. + returned: success + type: bool + prefixMatch: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + presentMatch: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value + or not. Only one of exactMatch, prefixMatch, suffixMatch, + regexMatch, presentMatch or rangeMatch must be set. + returned: success + type: bool + rangeMatch: + description: + - The header value must be an integer and its value must be + in the range specified in rangeMatch. If the header does not + contain an integer, number or is empty, the match fails. For + example for a range [-5, 0] - -3 will match. - 0 will not + match. - 0.25 will not match. - -3someString will not match. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: complex + contains: + rangeEnd: + description: + - The end of the range (exclusive). + returned: success + type: int + rangeStart: + description: + - The start of the range (inclusive). + returned: success + type: int + regexMatch: + description: + - 'The value of the header must match the regular expression + specified in regexMatch. For regular expression grammar, please + see: en.cppreference.com/w/cpp/regex/ecmascript For matching + against a port specified in the HTTP request, use a headerMatch + with headerName set to PORT and a regular expression that + satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + suffixMatch: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + ignoreCase: + description: + - Specifies that prefixMatch and fullPathMatch matches are case + sensitive. + - Defaults to false. + returned: success + type: bool + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing + configuration to a limited set xDS compliant clients. In their + xDS requests to Loadbalancer, xDS clients present node metadata. + If a match takes place, the relevant routing configuration is + made available to those proxies. For each metadataFilter in this + list, if its filterMatchCriteria is set to MATCH_ANY, at least + one of the filterLabels must match the corresponding label provided + in the metadata. If its filterMatchCriteria is set to MATCH_ALL, + then all of its filterLabels must match with corresponding labels + in the provided metadata. metadataFilters specified here can be + overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have + their loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterLabels: + description: + - The list of label value pairs that must match labels in the + provided metadata based on filterMatchCriteria This list must + not be empty and can have at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + returned: success + type: str + value: + description: + - The value of the label must match the specified value. + value can have a maximum length of 1024 characters. + returned: success + type: str + filterMatchCriteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: - MATCH_ANY: At least one of + the filterLabels must have a matching label in the provided + metadata.' + - "- MATCH_ALL: All filterLabels must have matching labels in + the provided metadata." + returned: success + type: str + prefixMatch: + description: + - For satisfying the matchRule condition, the request's path must + begin with the specified prefixMatch. prefixMatch must begin with + a /. The value must be between 1 and 1024 characters. Only one + of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + queryParameterMatches: + description: + - Specifies a list of query parameter match criteria, all of which + must match corresponding query parameters in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + returned: success + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + returned: success + type: str + presentMatch: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the + parameter has a value or not. Only one of presentMatch, exactMatch + and regexMatch must be set. + returned: success + type: bool + regexMatch: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For + the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be + set. + returned: success + type: str + regexMatch: + description: + - For satisfying the matchRule condition, the path of the request + must satisfy the regular expression specified in regexMatch after + removing any query parameters and anchor supplied with the original + URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + routeAction: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction + specifies any weightedBackendServices, service must not be set. Conversely + if service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This + translates to the Access- Control-Allow-Credentials header. + Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers + header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods + header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or + allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults + to false. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can + be cached. This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. + Similarly requests from clients can be aborted by the Loadbalancer + for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The + value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be + from 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The BackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be + > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. + If timeout in HttpRouteAction is not set, will use the largest + timeout among all backend services associated with the route. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry + if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: + disconnects, reset, read timeout, connection failure, and + refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "* refused-stream: Loadbalancer will retry if the backend + service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry." + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header + is set to resource-exhausted * unavailable: Loadbalancer will + retry if the gRPC status code in the response header is set + to unavailable ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding + the request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a + route match occurs. The weights determine the fraction of traffic + that flows to their corresponding backend service. If all traffic + needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default BackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, + computed as weight / (sum of all weightedBackendService weights + in routeAction) . The selection of a backend service is determined + only for new traffic. Once a user's request has been directed + to a backendService, subsequent requests will be sent to the + same backendService as determined by the BackendService's + session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of + the one that was supplied in the request. The value must be between + 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. If set to false, the URL scheme of the redirected request + will remain the same as that of the request. This must only be + set for UrlMaps used in TargetHttpProxys. + - Setting this true for TargetHttpsProxy is not permitted. Defaults + to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of + the one that was supplied in the request. Only one of pathRedirect + or prefixRedirect must be specified. The value must be between + 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting + the request. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: * MOVED_PERMANENTLY_DEFAULT, which is the default + value and corresponds to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, + the request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original + URL is removed prior to redirecting the request. If set to false, + the query portion of the original URL is retained. Defaults to + false. + returned: success + type: bool + defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected + to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, + defaultService or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the + one that was supplied in the request. The value must be between 1 + and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to + https. If set to false, the URL scheme of the redirected request will + remain the same as that of the request. This must only be set for + UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy + is not permitted. The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the + one that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither + is supplied, the path of the original request will be used for the + redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the + request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply + one alone or neither. If neither is supplied, the path of the original + request will be used for the redirect. The value must be between 1 + and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + request method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the + request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL + is removed prior to redirecting the request. If set to false, the + query portion of the original URL is retained. + returned: success + type: bool + defaultRouteAction: + description: + - defaultRouteAction takes effect when none of the pathRules or routeRules + match. The load balancer performs advanced routing actions like URL rewrites, + header transformations, etc. prior to forwarding the request to the selected + backend. If defaultRouteAction specifies any weightedBackendServices, + defaultService must not be set. + - Conversely if defaultService is set, defaultRouteAction cannot contain + any weightedBackendServices. + - Only one of defaultRouteAction or defaultUrlRedirect must be set. + returned: success + type: complex + contains: + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route + match occurs. + - The weights determine the fraction of traffic that flows to their + corresponding backend service. + - If all traffic needs to go to a single backend service, there must + be one weightedBackendService with weight set to a non 0 number. + - Once a backendService is identified and before forwarding the request + to the backend service, advanced routing actions like Url rewrites + and header transformations are applied depending on additional settings + specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the default BackendService resource. + Before forwarding the request to backendService, the loadbalancer + applies any relevant headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . + - The selection of a backend service is determined only for new + traffic. Once a user's request has been directed to a backendService, + subsequent requests will be sent to the same backendService as + determined by the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + headerAction: + description: + - Specifies changes to request and response headers that need to + take effect for the selected backendService. + - headerAction specified here take effect before headerAction in + the enclosing HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the backendService. + returned: success + type: list + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the + request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to the + client. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the + request to the matched service. + returned: success + type: complex + contains: + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by pathPrefixRewrite. + - The value must be between 1 and 1024 characters. + returned: success + type: str + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. + - The value must be between 1 and 255 characters. + returned: success + type: str + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. + - If not specified, will use the largest timeout among all backend services + associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. + Valid values are: * 5xx: Loadbalancer will attempt a retry if + the backend service responds with any 5xx response code, or if + the backend service does not respond at all, example: disconnects, + reset, read timeout, * connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream:Loadbalancer will retry if the backend service + resets the stream with a REFUSED_STREAM error code." + - This reset type indicates that it is safe to retry. + - "* cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled * deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded * resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header is set + to resource-exhausted * unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable + ." + returned: success + type: list + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. + If not specified, defaults to 1. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. + If timeout in HttpRouteAction is not set, will use the largest + timeout among all backend services associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. + - Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the BackendService resource being mirrored + to. + returned: success + type: dict + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + . + returned: success + type: complex + contains: + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. + - An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long results of a preflight request can be cached + in seconds. + - This translates to the Access-Control-Max-Age header. + returned: success + type: int + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. + - This translates to the Access-Control-Allow-Credentials header. + returned: success + type: bool + disabled: + description: + - If true, specifies the CORS policy is disabled. The default value + is false, which indicates that the CORS policy is in effect. + returned: success + type: bool + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. + - As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. Similarly + requests from clients can be aborted by the Loadbalancer for a percentage + of requests. + - timeout and retryPolicy will be ignored by clients that are configured + with a faultInjectionPolicy. + returned: success + type: complex + contains: + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 + min/hr * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. + - The value must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + tests: + description: + - The list of expected URL mapping tests. Request to update this UrlMap will + succeed only if all of the test cases pass. You can specify a maximum of 100 + tests per UrlMap. + returned: success + type: complex + contains: + description: + description: + - Description of this test case. + returned: success + type: str + host: + description: + - Host portion of the URL. + returned: success + type: str + path: + description: + - Path portion of the URL. + returned: success + type: str + service: + description: + - Expected BackendService resource the given URL should be mapped to. + returned: success + type: dict + defaultUrlRedirect: + description: + - When none of the specified hostRules match, the request is redirected to a + URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain + the same as that of the request. This must only be set for UrlMaps used + in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. + The default is set to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied together + with prefixRedirect. Supply one alone or neither. If neither is supplied, + the path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - prefixRedirect cannot be supplied together with pathRedirect. Supply one + alone or neither. If neither is supplied, the path of the original request + will be used for the redirect. The value must be between 1 and 1024 characters. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported values + are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds + to 301.' + - "* FOUND, which corresponds to 302." + - "* SEE_OTHER which corresponds to 303." + - "* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request + method will be retained." + - "* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request + method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query portion + of the original URL is retained. The default is set to false. + returned: success + type: bool + defaultRouteAction: + description: + - defaultRouteAction takes effect when none of the hostRules match. The load + balancer performs advanced routing actions like URL rewrites, header transformations, + etc. prior to forwarding the request to the selected backend. + - If defaultRouteAction specifies any weightedBackendServices, defaultService + must not be set. Conversely if defaultService is set, defaultRouteAction cannot + contain any weightedBackendServices. + - Only one of defaultRouteAction or defaultUrlRedirect must be set. + returned: success + type: complex + contains: + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a route match + occurs. + - The weights determine the fraction of traffic that flows to their corresponding + backend service. + - If all traffic needs to go to a single backend service, there must be + one weightedBackendService with weight set to a non 0 number. + - Once a backendService is identified and before forwarding the request + to the backend service, advanced routing actions like Url rewrites and + header transformations are applied depending on additional settings specified + in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the default BackendService resource. Before + forwarding the request to backendService, the loadbalancer applies + any relevant headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + weight: + description: + - Specifies the fraction of traffic sent to backendService, computed + as weight / (sum of all weightedBackendService weights in routeAction) + . + - The selection of a backend service is determined only for new traffic. + Once a user's request has been directed to a backendService, subsequent + requests will be sent to the same backendService as determined by + the BackendService's session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + headerAction: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. + - headerAction specified here take effect before headerAction in the + enclosing HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + returned: success + type: list + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any + values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back + to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header to add. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. + - If true, headerValue is set for the header, discarding any + values that were set for that header. + returned: success + type: bool + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding the request + to the matched service. + returned: success + type: complex + contains: + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, the + matching portion of the request's path is replaced by pathPrefixRewrite. + - The value must be between 1 and 1024 characters. + returned: success + type: str + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the request's + host header is replaced with contents of hostRewrite. + - The value must be between 1 and 255 characters. + returned: success + type: str + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed from + the time the request has been fully processed (i.e. end-of-stream) up + until the response has been completely processed. Timeout includes all + retries. + - If not specified, will use the largest timeout among all backend services + associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 + hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds field + and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. Valid + values are: * 5xx: Loadbalancer will attempt a retry if the backend + service responds with any 5xx response code, or if the backend service + does not respond at all, example: disconnects, reset, read timeout, + * connection failure, and refused streams.' + - "* gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504." + - "* connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "* retriable-4xx: Loadbalancer will retry for retriable 4xx response + codes." + - Currently the only retriable error supported is 409. + - "* refused-stream:Loadbalancer will retry if the backend service resets + the stream with a REFUSED_STREAM error code." + - This reset type indicates that it is safe to retry. + - "* cancelled: Loadbalancer will retry if the gRPC status code in the + response header is set to cancelled * deadline-exceeded: Loadbalancer + will retry if the gRPC status code in the response header is set to + deadline-exceeded * resource-exhausted: Loadbalancer will retry if + the gRPC status code in the response header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in + the response header is set to unavailable ." + returned: success + type: list + numRetries: + description: + - Specifies the allowed number retries. This number must be > 0. If + not specified, defaults to 1. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. If + timeout in HttpRouteAction is not set, will use the largest timeout + among all backend services associated with the route. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's backends + are shadowed to a separate mirrored backend service. + - Loadbalancer does not wait for responses from the shadow service. Prior + to sending traffic to the shadow service, the host / authority header + is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The full or partial URL to the BackendService resource being mirrored + to. + returned: success + type: dict + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. Please + see [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + . + returned: success + type: complex + contains: + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS requests. + - An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regular expression patterns that match allowed origins. + For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins + or an item in allowOriginRegexes. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods header. + returned: success + type: list + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers header. + returned: success + type: list + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers header. + returned: success + type: list + maxAge: + description: + - Specifies how long results of a preflight request can be cached in + seconds. + - This translates to the Access-Control-Max-Age header. + returned: success + type: int + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. + - This translates to the Access-Control-Allow-Credentials header. + returned: success + type: bool + disabled: + description: + - If true, specifies the CORS policy is disabled. The default value + is false, which indicates that the CORS policy is in effect. + returned: success + type: bool + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic to test + the resiliency of clients to backend service failure. + - As part of fault injection, when clients send requests to a backend service, + delays can be introduced by Loadbalancer on a percentage of requests before + sending those request to the backend service. Similarly requests from + clients can be aborted by the Loadbalancer for a percentage of requests. + - timeout and retryPolicy will be ignored by clients that are configured + with a faultInjectionPolicy. + returned: success + type: complex + contains: + delay: + description: + - The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + - 'Note: these bounds are computed from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year * 10000 years .' + returned: success + type: str + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 seconds + field and a positive nanos field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) on + which delay will be introduced as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + abort: + description: + - The specification for how client requests are aborted as part of fault + injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. + - The value must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) which + will be aborted as part of fault injection. + - The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/global/urlMaps".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel.py new file mode 100644 index 000000000..60705a64c --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel.py @@ -0,0 +1,612 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_vpn_tunnel +description: +- VPN tunnel resource. +short_description: Creates a GCP VpnTunnel +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + type: str + description: + description: + - An optional description of this resource. + required: false + type: str + target_vpn_gateway: + description: + - URL of the Target VPN gateway with which this VPN tunnel is associated. + - 'This field represents a link to a TargetVpnGateway resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_target_vpn_gateway task and then set this + target_vpn_gateway field to "{{ name-of-resource }}"' + required: false + type: dict + vpn_gateway: + description: + - URL of the VPN gateway with which this VPN tunnel is associated. + - This must be used if a High Availability VPN gateway resource is created. + - 'This field represents a link to a VpnGateway resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_vpn_gateway task and then set this vpn_gateway field to "{{ + name-of-resource }}"' + required: false + type: dict + vpn_gateway_interface: + description: + - The interface ID of the VPN gateway with which this VPN tunnel is associated. + required: false + type: int + peer_external_gateway: + description: + - URL of the peer side external VPN gateway to which this VPN tunnel is connected. + - 'This field represents a link to a ExternalVpnGateway resource in GCP. It can + be specified in two ways. First, you can place a dictionary with key ''selfLink'' + and value of your resource''s selfLink Alternatively, you can add `register: + name-of-resource` to a gcp_compute_external_vpn_gateway task and then set this + peer_external_gateway field to "{{ name-of-resource }}"' + required: false + type: dict + peer_external_gateway_interface: + description: + - The interface ID of the external VPN gateway to which this VPN tunnel is connected. + required: false + type: int + peer_gcp_gateway: + description: + - URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. + - If provided, the VPN tunnel will automatically use the same vpn_gateway_interface + ID in the peer GCP VPN gateway. + - 'This field represents a link to a VpnGateway resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_vpn_gateway task and then set this peer_gcp_gateway field to + "{{ name-of-resource }}"' + required: false + type: dict + router: + description: + - URL of router resource to be used for dynamic routing. + - 'This field represents a link to a Router resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''selfLink'' and value + of your resource''s selfLink Alternatively, you can add `register: name-of-resource` + to a gcp_compute_router task and then set this router field to "{{ name-of-resource + }}"' + required: false + type: dict + peer_ip: + description: + - IP address of the peer VPN gateway. Only IPv4 is supported. + required: false + type: str + shared_secret: + description: + - Shared secret used to set the secure session between the Cloud VPN gateway and + the peer VPN gateway. + required: true + type: str + ike_version: + description: + - IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. + - Acceptable IKE versions are 1 or 2. Default version is 2. + required: false + default: '2' + type: int + local_traffic_selector: + description: + - Local traffic selector to use when establishing the VPN tunnel with peer VPN + gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + elements: str + required: false + type: list + remote_traffic_selector: + description: + - Remote traffic selector to use when establishing the VPN tunnel with peer VPN + gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + elements: str + required: false + type: list + region: + description: + - The region where the tunnel is located. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels)' +- 'Cloud VPN Overview: U(https://cloud.google.com/vpn/docs/concepts/overview)' +- 'Networks and Tunnel Routing: U(https://cloud.google.com/vpn/docs/concepts/choosing-networks-routing)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-vpn-tunnel + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a router + google.cloud.gcp_compute_router: + name: router-vpn-tunnel + network: "{{ network }}" + bgp: + asn: 64514 + advertise_mode: CUSTOM + advertised_groups: + - ALL_SUBNETS + advertised_ip_ranges: + - range: 1.2.3.4 + - range: 6.7.0.0/16 + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: router + +- name: create a target vpn gateway + google.cloud.gcp_compute_target_vpn_gateway: + name: gateway-vpn-tunnel + region: us-west1 + network: "{{ network }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: gateway + +- name: create a vpn tunnel + google.cloud.gcp_compute_vpn_tunnel: + name: test_object + region: us-west1 + target_vpn_gateway: "{{ gateway }}" + router: "{{ router }}" + shared_secret: super secret + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +id: + description: + - The unique identifier for the resource. This identifier is defined by the server. + returned: success + type: str +creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + returned: success + type: str +description: + description: + - An optional description of this resource. + returned: success + type: str +targetVpnGateway: + description: + - URL of the Target VPN gateway with which this VPN tunnel is associated. + returned: success + type: dict +vpnGateway: + description: + - URL of the VPN gateway with which this VPN tunnel is associated. + - This must be used if a High Availability VPN gateway resource is created. + returned: success + type: dict +vpnGatewayInterface: + description: + - The interface ID of the VPN gateway with which this VPN tunnel is associated. + returned: success + type: int +peerExternalGateway: + description: + - URL of the peer side external VPN gateway to which this VPN tunnel is connected. + returned: success + type: dict +peerExternalGatewayInterface: + description: + - The interface ID of the external VPN gateway to which this VPN tunnel is connected. + returned: success + type: int +peerGcpGateway: + description: + - URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. + - If provided, the VPN tunnel will automatically use the same vpn_gateway_interface + ID in the peer GCP VPN gateway. + returned: success + type: dict +router: + description: + - URL of router resource to be used for dynamic routing. + returned: success + type: dict +peerIp: + description: + - IP address of the peer VPN gateway. Only IPv4 is supported. + returned: success + type: str +sharedSecret: + description: + - Shared secret used to set the secure session between the Cloud VPN gateway and + the peer VPN gateway. + returned: success + type: str +sharedSecretHash: + description: + - Hash of the shared secret. + returned: success + type: str +ikeVersion: + description: + - IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. + - Acceptable IKE versions are 1 or 2. Default version is 2. + returned: success + type: int +localTrafficSelector: + description: + - Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. + The value should be a CIDR formatted string, for example `192.168.0.0/16`. The + ranges should be disjoint. + - Only IPv4 is supported. + returned: success + type: list +remoteTrafficSelector: + description: + - Remote traffic selector to use when establishing the VPN tunnel with peer VPN + gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + returned: success + type: list +region: + description: + - The region where the tunnel is located. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + target_vpn_gateway=dict(type='dict'), + vpn_gateway=dict(type='dict'), + vpn_gateway_interface=dict(type='int'), + peer_external_gateway=dict(type='dict'), + peer_external_gateway_interface=dict(type='int'), + peer_gcp_gateway=dict(type='dict'), + router=dict(type='dict'), + peer_ip=dict(type='str'), + shared_secret=dict(required=True, type='str', no_log=True), + ike_version=dict(default=2, type='int'), + local_traffic_selector=dict(type='list', elements='str'), + remote_traffic_selector=dict(type='list', elements='str'), + region=dict(required=True, type='str'), + ), + mutually_exclusive=[['peer_external_gateway', 'peer_gcp_gateway']], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + state = module.params['state'] + kind = 'compute#vpnTunnel' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + delete(module, self_link(module), kind) + create(module, collection(module), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'compute') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'compute#vpnTunnel', + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'targetVpnGateway': replace_resource_dict(module.params.get(u'target_vpn_gateway', {}), 'selfLink'), + u'vpnGateway': replace_resource_dict(module.params.get(u'vpn_gateway', {}), 'selfLink'), + u'vpnGatewayInterface': module.params.get('vpn_gateway_interface'), + u'peerExternalGateway': replace_resource_dict(module.params.get(u'peer_external_gateway', {}), 'selfLink'), + u'peerExternalGatewayInterface': module.params.get('peer_external_gateway_interface'), + u'peerGcpGateway': replace_resource_dict(module.params.get(u'peer_gcp_gateway', {}), 'selfLink'), + u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'), + u'peerIp': module.params.get('peer_ip'), + u'sharedSecret': module.params.get('shared_secret'), + u'ikeVersion': module.params.get('ike_version'), + u'localTrafficSelector': module.params.get('local_traffic_selector'), + u'remoteTrafficSelector': module.params.get('remote_traffic_selector'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'compute') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/vpnTunnels/{name}".format(**module.params) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/vpnTunnels".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + # shared_secret is returned with stars instead of the + # actual secret + keys_to_ignore = ("sharedSecret") + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in keys_to_ignore: + continue + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in keys_to_ignore: + continue + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'id': response.get(u'id'), + u'creationTimestamp': response.get(u'creationTimestamp'), + u'name': response.get(u'name'), + u'description': module.params.get('description'), + u'targetVpnGateway': replace_resource_dict(module.params.get(u'target_vpn_gateway', {}), 'selfLink'), + u'vpnGateway': replace_resource_dict(module.params.get(u'vpn_gateway', {}), 'selfLink'), + u'vpnGatewayInterface': module.params.get('vpn_gateway_interface'), + u'peerExternalGateway': replace_resource_dict(module.params.get(u'peer_external_gateway', {}), 'selfLink'), + u'peerExternalGatewayInterface': response.get(u'peerExternalGatewayInterface'), + u'peerGcpGateway': response.get(u'peerGcpGateway'), + u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'), + u'peerIp': response.get(u'peerIp'), + u'sharedSecret': response.get(u'sharedSecret'), + u'sharedSecretHash': response.get(u'sharedSecretHash'), + u'ikeVersion': response.get(u'ikeVersion'), + u'localTrafficSelector': response.get(u'localTrafficSelector'), + u'remoteTrafficSelector': response.get(u'remoteTrafficSelector'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'compute#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#vpnTunnel') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'compute#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel_info.py new file mode 100644 index 000000000..e0ee1f9ed --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_compute_vpn_tunnel_info.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_vpn_tunnel_info +description: +- Gather info for GCP VpnTunnel +short_description: Gather info for GCP VpnTunnel +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + elements: str + region: + description: + - The region where the tunnel is located. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a vpn tunnel + gcp_compute_vpn_tunnel_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + targetVpnGateway: + description: + - URL of the Target VPN gateway with which this VPN tunnel is associated. + returned: success + type: dict + vpnGateway: + description: + - URL of the VPN gateway with which this VPN tunnel is associated. + - This must be used if a High Availability VPN gateway resource is created. + returned: success + type: dict + vpnGatewayInterface: + description: + - The interface ID of the VPN gateway with which this VPN tunnel is associated. + returned: success + type: int + peerExternalGateway: + description: + - URL of the peer side external VPN gateway to which this VPN tunnel is connected. + returned: success + type: dict + peerExternalGatewayInterface: + description: + - The interface ID of the external VPN gateway to which this VPN tunnel is connected. + returned: success + type: int + peerGcpGateway: + description: + - URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. + - If provided, the VPN tunnel will automatically use the same vpn_gateway_interface + ID in the peer GCP VPN gateway. + returned: success + type: dict + router: + description: + - URL of router resource to be used for dynamic routing. + returned: success + type: dict + peerIp: + description: + - IP address of the peer VPN gateway. Only IPv4 is supported. + returned: success + type: str + sharedSecret: + description: + - Shared secret used to set the secure session between the Cloud VPN gateway + and the peer VPN gateway. + returned: success + type: str + sharedSecretHash: + description: + - Hash of the shared secret. + returned: success + type: str + ikeVersion: + description: + - IKE protocol version to use when establishing the VPN tunnel with peer VPN + gateway. + - Acceptable IKE versions are 1 or 2. Default version is 2. + returned: success + type: int + localTrafficSelector: + description: + - Local traffic selector to use when establishing the VPN tunnel with peer VPN + gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + returned: success + type: list + remoteTrafficSelector: + description: + - Remote traffic selector to use when establishing the VPN tunnel with peer + VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + returned: success + type: list + region: + description: + - The region where the tunnel is located. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/vpnTunnels".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_container_cluster.py b/ansible_collections/google/cloud/plugins/modules/gcp_container_cluster.py new file mode 100644 index 000000000..968dfb3ad --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_container_cluster.py @@ -0,0 +1,2435 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_container_cluster +description: +- A Google Container Engine cluster. +short_description: Creates a GCP Cluster +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name of this cluster. The name must be unique within this project and location, + and can be up to 40 characters. Must be Lowercase letters, numbers, and hyphens + only. Must start with a letter. Must end with a number or a letter. + required: false + type: str + description: + description: + - An optional description of this cluster. + required: false + type: str + initial_node_count: + description: + - The number of nodes to create in this cluster. You must ensure that your Compute + Engine resource quota is sufficient for this number of instances. You must also + have available firewall and routes quota. For requests, this field should only + be used in lieu of a "nodePool" object, since this configuration (along with + the "nodeConfig") will be used to create a "NodePool" object with an auto-generated + name. Do not use this and a nodePool at the same time. + - This field has been deprecated. Please use nodePool.initial_node_count instead. + required: false + type: int + node_config: + description: + - Parameters used in creating the cluster's nodes. + - For requests, this field should only be used in lieu of a "nodePool" object, + since this configuration (along with the "initialNodeCount") will be used to + create a "NodePool" object with an auto-generated name. Do not use this and + a nodePool at the same time. For responses, this field will be populated with + the node configuration of the first node pool. If unspecified, the defaults + are used. + required: false + type: dict + suboptions: + machine_type: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + required: false + type: str + disk_size_gb: + description: + - Size of the disk attached to each node, specified in GB. The smallest allowed + disk size is 10GB. If unspecified, the default disk size is 100GB. + required: false + type: int + oauth_scopes: + description: + - The set of Google API scopes to be made available on all of the node VMs + under the "default" service account. + - 'The following scopes are recommended, but not required, and by default + are not included: U(https://www.googleapis.com/auth/compute) is required + for mounting persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for + communicating with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + elements: str + required: false + type: list + service_account: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. If + no Service Account is specified, the "default" service account is used. + required: false + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. + Additionally, to avoid ambiguity, keys must not conflict with any other + metadata keys for the project or be one of the four reserved keys: "instance-template", + "kube-env", "startup-script", and "user-data" Values are free-form strings, + and only have meaning as interpreted by the image running in the instance. + The only restriction placed on them is that each value''s size must be less + than or equal to 32 KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + required: false + type: dict + image_type: + description: + - The image type to use for this node. Note that for a given image type, the + latest version of it will be used. + required: false + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each node. + These will added in addition to any default label(s) that Kubernetes may + apply to the node. In case of conflict in label keys, the applied set may + differ depending on the Kubernetes version -- it''s best to assume the behavior + is undefined and conflicts should be avoided. For more information, including + usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + required: false + type: dict + local_ssd_count: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks + available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + required: false + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the + client during cluster or node pool creation. Each tag within the list must + comply with RFC1035. + elements: str + required: false + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + required: false + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. See U(https://cloud.google.com/compute/docs/gpus) + for more information about support for GPUs. + elements: dict + required: false + type: list + suboptions: + accelerator_count: + description: + - The number of accelerator cards exposed to an instance. + required: false + type: str + accelerator_type: + description: + - The accelerator type resource name. + required: false + type: str + disk_type: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd-standard' . + required: false + type: str + min_cpu_platform: + description: + - Minimum CPU platform to be used by this instance. The instance may be scheduled + on the specified or newer CPU platform. + required: false + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + - 'For more information, including usage and the valid values, see: U(https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + .' + elements: dict + required: false + type: list + suboptions: + key: + description: + - Key for taint. + required: false + type: str + value: + description: + - Value for taint. + required: false + type: str + effect: + description: + - Effect for taint. + - 'Some valid choices include: "EFFECT_UNSPECIFIED", "NO_SCHEDULE", "PREFER_NO_SCHEDULE", + "NO_EXECUTE"' + required: false + type: str + shielded_instance_config: + description: + - Shielded Instance options. + required: false + type: dict + suboptions: + enable_secure_boot: + description: + - Defines whether the instance has Secure Boot enabled. + - Secure Boot helps ensure that the system only runs authentic software + by verifying the digital signature of all boot components, and halting + the boot process if signature verification fails. + required: false + type: bool + enable_integrity_monitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + - Enables monitoring and attestation of the boot integrity of the instance. + - The attestation is performed against the integrity policy baseline. + This baseline is initially derived from the implicitly trusted boot + image when the instance is created. + required: false + type: bool + master_auth: + description: + - The authentication information for accessing the master endpoint. + required: false + type: dict + suboptions: + username: + description: + - The username to use for HTTP basic authentication to the master endpoint. + (unsupported with GKE >= 1.19). + required: false + type: str + password: + description: + - The password to use for HTTP basic authentication to the master endpoint. + Because the master endpoint is open to the Internet, you should create a + strong password with a minimum of 16 characters. + (unsupported with GKE >= 1.19). + required: false + type: str + client_certificate_config: + description: + - Configuration for client certificate authentication on the cluster. For + clusters before v1.12, if no configuration is specified, a client certificate + is issued. + required: false + type: dict + suboptions: + issue_client_certificate: + description: + - Issue a client certificate. + required: false + type: bool + logging_service: + description: + - 'The logging service the cluster should use to write logs. Currently available + options: logging.googleapis.com - the Google Cloud Logging service.' + - none - no logs will be exported from the cluster. + - if left as an empty string,logging.googleapis.com will be used. + - 'Some valid choices include: "logging.googleapis.com", "none"' + required: false + type: str + monitoring_service: + description: + - The monitoring service the cluster should use to write metrics. + - 'Currently available options: monitoring.googleapis.com - the Google Cloud Monitoring + service.' + - none - no metrics will be exported from the cluster. + - if left as an empty string, monitoring.googleapis.com will be used. + - 'Some valid choices include: "monitoring.googleapis.com", "none"' + required: false + type: str + network: + description: + - The name of the Google Compute Engine network to which the cluster is connected. + If left unspecified, the default network will be used. + required: false + type: str + database_encryption: + description: + - Configuration of etcd encryption. + required: false + type: dict + suboptions: + state: + description: + - Denotes the state of etcd encryption. + - 'Some valid choices include: "ENCRYPTED", "DECRYPTED"' + required: false + type: str + key_name: + description: + - Name of CloudKMS key to use for the encryption of secrets in etcd. Ex. + - "`projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key` + ." + required: false + type: str + private_cluster_config: + description: + - Configuration for a private cluster. + required: false + type: dict + suboptions: + enable_private_nodes: + description: + - Whether nodes have internal IP addresses only. If enabled, all nodes are + given only RFC 1918 private addresses and communicate with the master via + private networking. + required: false + type: bool + enable_private_endpoint: + description: + - Whether the master's internal IP address is used as the cluster endpoint. + required: false + type: bool + master_ipv4_cidr_block: + description: + - The IP range in CIDR notation to use for the hosted master network. This + range will be used for assigning internal IP addresses to the master or + set of masters, as well as the ILB VIP. This range must not overlap with + any other ranges in use within the cluster's network. + required: false + type: str + cluster_ipv4_cidr: + description: + - The IP address range of the container pods in this cluster, in CIDR notation + (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify + a /14 block in 10.0.0.0/8. + required: false + type: str + enable_tpu: + description: + - "(Optional) Whether to enable Cloud TPU resources in this cluster." + - See the official documentation - U(https://cloud.google.com/tpu/docs/kubernetes-engine-setup) + . + required: false + type: bool + addons_config: + description: + - Configurations for the various addons available to run in the cluster. + required: false + type: dict + suboptions: + http_load_balancing: + description: + - Configuration for the HTTP (L7) load balancing controller addon, which makes + it easy to set up HTTP load balancers for services in a cluster. + required: false + type: dict + suboptions: + disabled: + description: + - Whether the HTTP Load Balancing controller is enabled in the cluster. + When enabled, it runs a small pod in the cluster that manages the load + balancers. + required: false + type: bool + horizontal_pod_autoscaling: + description: + - Configuration for the horizontal pod autoscaling feature, which increases + or decreases the number of replica pods a replication controller has based + on the resource usage of the existing pods. + required: false + type: dict + suboptions: + disabled: + description: + - Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. + When enabled, it ensures that a Heapster pod is running in the cluster, + which is also used by the Cloud Monitoring service. + required: false + type: bool + network_policy_config: + description: + - Configuration for NetworkPolicy. This only tracks whether the addon is enabled + or not on the Master, it does not track whether network policy is enabled + for the nodes. + required: false + type: dict + suboptions: + disabled: + description: + - Whether NetworkPolicy is enabled for this cluster. + required: false + type: bool + subnetwork: + description: + - The name of the Google Compute Engine subnetwork to which the cluster is connected. + required: false + type: str + locations: + description: + - The list of Google Compute Engine zones in which the cluster's nodes should + be located. + elements: str + required: false + type: list + aliases: + - nodeLocations + resource_labels: + description: + - The resource labels for the cluster to use to annotate any related Google Compute + Engine resources. + required: false + type: dict + legacy_abac: + description: + - Configuration for the legacy ABAC authorization mode. + required: false + type: dict + suboptions: + enabled: + description: + - Whether the ABAC authorizer is enabled for this cluster. When enabled, identities + in the system, including service accounts, nodes, and controllers, will + have statically granted permissions beyond those provided by the RBAC configuration + or IAM. + required: false + type: bool + network_policy: + description: + - Configuration options for the NetworkPolicy feature. + required: false + type: dict + suboptions: + provider: + description: + - The selected network policy provider. + - 'Some valid choices include: "PROVIDER_UNSPECIFIED", "CALICO"' + required: false + type: str + enabled: + description: + - Whether network policy is enabled on the cluster. + required: false + type: bool + default_max_pods_constraint: + description: + - The default constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool of this cluster. + - Only honored if cluster created with IP Alias support. + required: false + type: dict + suboptions: + max_pods_per_node: + description: + - Constraint enforced on the max num of pods per node. + required: false + type: str + ip_allocation_policy: + description: + - Configuration for controlling how IPs are allocated in the cluster. + required: false + type: dict + suboptions: + use_ip_aliases: + description: + - Whether alias IPs will be used for pod IPs in the cluster. + required: false + type: bool + create_subnetwork: + description: + - Whether a new subnetwork will be created automatically for the cluster. + required: false + type: bool + subnetwork_name: + description: + - A custom subnetwork name to be used if createSubnetwork is true. + - If this field is empty, then an automatic name will be chosen for the new + subnetwork. + required: false + type: str + cluster_secondary_range_name: + description: + - The name of the secondary range to be used for the cluster CIDR block. The + secondary range will be used for pod IP addresses. + - This must be an existing secondary range associated with the cluster subnetwork + . + required: false + type: str + services_secondary_range_name: + description: + - The name of the secondary range to be used as for the services CIDR block. + The secondary range will be used for service ClusterIPs. This must be an + existing secondary range associated with the cluster subnetwork. + required: false + type: str + cluster_ipv4_cidr_block: + description: + - The IP address range for the cluster pod IPs. If this field is set, then + cluster.cluster_ipv4_cidr must be left blank. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + required: false + type: str + node_ipv4_cidr_block: + description: + - The IP address range of the instance IPs in this cluster. + - This is applicable only if createSubnetwork is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + required: false + type: str + services_ipv4_cidr_block: + description: + - The IP address range of the services IPs in this cluster. If blank, a range + will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + required: false + type: str + tpu_ipv4_cidr_block: + description: + - The IP address range of the Cloud TPUs in this cluster. If unspecified, + a range will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - If unspecified, the range will use the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + required: false + type: str + initial_cluster_version: + description: + - The software version of the master endpoint and kubelets used in the cluster + when it was first created. The version can be upgraded over time. + required: false + type: str + master_authorized_networks_config: + description: + - Configuration for controlling how IPs are allocated in the cluster. + required: false + type: dict + suboptions: + enabled: + description: + - Whether or not master authorized networks is enabled. + required: false + type: bool + cidr_blocks: + description: + - Define up to 50 external networks that could access Kubernetes master through + HTTPS. + elements: dict + required: false + type: list + suboptions: + display_name: + description: + - Optional field used to identify cidr blocks. + required: false + type: str + cidr_block: + description: + - Block specified in CIDR notation. + required: false + type: str + binary_authorization: + description: + - Configuration for the BinaryAuthorization feature. + required: false + type: dict + suboptions: + enabled: + description: + - If enabled, all container images will be validated by Binary Authorization. + required: false + type: bool + release_channel: + description: + - ReleaseChannel indicates which release channel a cluster is subscribed to. + - Release channels are arranged in order of risk and frequency of updates. + required: false + type: dict + suboptions: + channel: + description: + - Which release channel the cluster is subscribed to. + - 'Some valid choices include: "UNSPECIFIED", "RAPID", "REGULAR", "STABLE"' + required: false + type: str + shielded_nodes: + description: + - Shielded Nodes configuration. + required: false + type: dict + suboptions: + enabled: + description: + - Whether Shielded Nodes features are enabled on all nodes in this cluster. + required: false + type: bool + network_config: + description: + - Network configurations . + required: false + type: dict + suboptions: + enable_intra_node_visibility: + description: + - Whether Intra-node visibility is enabled for this cluster. This makes same + node pod to pod traffic visible for VPC network. + required: false + type: bool + default_snat_status: + description: + - Whether the cluster disables default in-node sNAT rules. In-node sNAT rules + will be disabled when defaultSnatStatus is disabled. + required: false + type: bool + enable_kubernetes_alpha: + description: + - Kubernetes alpha features are enabled on this cluster. This includes alpha API + groups (e.g. v1alpha1) and features that may not be production ready in the + kubernetes version of the master and nodes. + required: false + type: bool + location: + description: + - The location where the cluster is deployed. + required: true + type: str + aliases: + - zone + kubectl_path: + description: + - The path that the kubectl config file will be written to. + - The file will not be created if this path is unset. + - Any existing file at this path will be completely overwritten. + - This requires the PyYaml library. + required: false + type: str + kubectl_context: + description: + - The name of the context for the kubectl config file. Will default to the cluster + name. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a cluster + google.cloud.gcp_container_cluster: + name: my-cluster + initial_node_count: 2 + node_config: + machine_type: n1-standard-4 + disk_size_gb: 500 + location: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name of this cluster. The name must be unique within this project and location, + and can be up to 40 characters. Must be Lowercase letters, numbers, and hyphens + only. Must start with a letter. Must end with a number or a letter. + returned: success + type: str +description: + description: + - An optional description of this cluster. + returned: success + type: str +initialNodeCount: + description: + - The number of nodes to create in this cluster. You must ensure that your Compute + Engine resource quota is sufficient for this number of instances. You must also + have available firewall and routes quota. For requests, this field should only + be used in lieu of a "nodePool" object, since this configuration (along with the + "nodeConfig") will be used to create a "NodePool" object with an auto-generated + name. Do not use this and a nodePool at the same time. + - This field has been deprecated. Please use nodePool.initial_node_count instead. + returned: success + type: int +nodeConfig: + description: + - Parameters used in creating the cluster's nodes. + - For requests, this field should only be used in lieu of a "nodePool" object, since + this configuration (along with the "initialNodeCount") will be used to create + a "NodePool" object with an auto-generated name. Do not use this and a nodePool + at the same time. For responses, this field will be populated with the node configuration + of the first node pool. If unspecified, the defaults are used. + returned: success + type: complex + contains: + machineType: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + returned: success + type: str + diskSizeGb: + description: + - Size of the disk attached to each node, specified in GB. The smallest allowed + disk size is 10GB. If unspecified, the default disk size is 100GB. + returned: success + type: int + oauthScopes: + description: + - The set of Google API scopes to be made available on all of the node VMs under + the "default" service account. + - 'The following scopes are recommended, but not required, and by default are + not included: U(https://www.googleapis.com/auth/compute) is required for mounting + persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for communicating + with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + returned: success + type: list + serviceAccount: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. If no + Service Account is specified, the "default" service account is used. + returned: success + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. Additionally, + to avoid ambiguity, keys must not conflict with any other metadata keys for + the project or be one of the four reserved keys: "instance-template", "kube-env", + "startup-script", and "user-data" Values are free-form strings, and only have + meaning as interpreted by the image running in the instance. The only restriction + placed on them is that each value''s size must be less than or equal to 32 + KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + imageType: + description: + - The image type to use for this node. Note that for a given image type, the + latest version of it will be used. + returned: success + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each node. + These will added in addition to any default label(s) that Kubernetes may apply + to the node. In case of conflict in label keys, the applied set may differ + depending on the Kubernetes version -- it''s best to assume the behavior is + undefined and conflicts should be avoided. For more information, including + usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + localSsdCount: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks available + on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + returned: success + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the client + during cluster or node pool creation. Each tag within the list must comply + with RFC1035. + returned: success + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + returned: success + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. See U(https://cloud.google.com/compute/docs/gpus) + for more information about support for GPUs. + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of accelerator cards exposed to an instance. + returned: success + type: str + acceleratorType: + description: + - The accelerator type resource name. + returned: success + type: str + diskType: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') If + unspecified, the default disk type is 'pd-standard' . + returned: success + type: str + minCpuPlatform: + description: + - Minimum CPU platform to be used by this instance. The instance may be scheduled + on the specified or newer CPU platform. + returned: success + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + - 'For more information, including usage and the valid values, see: U(https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + .' + returned: success + type: complex + contains: + key: + description: + - Key for taint. + returned: success + type: str + value: + description: + - Value for taint. + returned: success + type: str + effect: + description: + - Effect for taint. + returned: success + type: str + shieldedInstanceConfig: + description: + - Shielded Instance options. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + - Secure Boot helps ensure that the system only runs authentic software + by verifying the digital signature of all boot components, and halting + the boot process if signature verification fails. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + - Enables monitoring and attestation of the boot integrity of the instance. + - The attestation is performed against the integrity policy baseline. This + baseline is initially derived from the implicitly trusted boot image when + the instance is created. + returned: success + type: bool +masterAuth: + description: + - The authentication information for accessing the master endpoint. + returned: success + type: complex + contains: + username: + description: + - The username to use for HTTP basic authentication to the master endpoint. + (unsupported with GKE >= 1.19). + returned: success + type: str + password: + description: + - The password to use for HTTP basic authentication to the master endpoint. + Because the master endpoint is open to the Internet, you should create a strong + password with a minimum of 16 characters. + (unsupported with GKE >= 1.19). + returned: success + type: str + clientCertificateConfig: + description: + - Configuration for client certificate authentication on the cluster. For clusters + before v1.12, if no configuration is specified, a client certificate is issued. + returned: success + type: complex + contains: + issueClientCertificate: + description: + - Issue a client certificate. + returned: success + type: bool + clusterCaCertificate: + description: + - Base64-encoded public certificate that is the root of trust for the cluster. + returned: success + type: str + clientCertificate: + description: + - Base64-encoded public certificate used by clients to authenticate to the cluster + endpoint. + returned: success + type: str + clientKey: + description: + - Base64-encoded private key used by clients to authenticate to the cluster + endpoint. + returned: success + type: str +loggingService: + description: + - 'The logging service the cluster should use to write logs. Currently available + options: logging.googleapis.com - the Google Cloud Logging service.' + - none - no logs will be exported from the cluster. + - if left as an empty string,logging.googleapis.com will be used. + returned: success + type: str +monitoringService: + description: + - The monitoring service the cluster should use to write metrics. + - 'Currently available options: monitoring.googleapis.com - the Google Cloud Monitoring + service.' + - none - no metrics will be exported from the cluster. + - if left as an empty string, monitoring.googleapis.com will be used. + returned: success + type: str +network: + description: + - The name of the Google Compute Engine network to which the cluster is connected. + If left unspecified, the default network will be used. + returned: success + type: str +databaseEncryption: + description: + - Configuration of etcd encryption. + returned: success + type: complex + contains: + state: + description: + - Denotes the state of etcd encryption. + returned: success + type: str + keyName: + description: + - Name of CloudKMS key to use for the encryption of secrets in etcd. Ex. + - "`projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key` + ." + returned: success + type: str +privateClusterConfig: + description: + - Configuration for a private cluster. + returned: success + type: complex + contains: + enablePrivateNodes: + description: + - Whether nodes have internal IP addresses only. If enabled, all nodes are given + only RFC 1918 private addresses and communicate with the master via private + networking. + returned: success + type: bool + enablePrivateEndpoint: + description: + - Whether the master's internal IP address is used as the cluster endpoint. + returned: success + type: bool + masterIpv4CidrBlock: + description: + - The IP range in CIDR notation to use for the hosted master network. This range + will be used for assigning internal IP addresses to the master or set of masters, + as well as the ILB VIP. This range must not overlap with any other ranges + in use within the cluster's network. + returned: success + type: str + privateEndpoint: + description: + - The internal IP address of this cluster's master endpoint. + returned: success + type: str + publicEndpoint: + description: + - The external IP address of this cluster's master endpoint. + returned: success + type: str +clusterIpv4Cidr: + description: + - The IP address range of the container pods in this cluster, in CIDR notation (e.g. + 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block + in 10.0.0.0/8. + returned: success + type: str +enableTpu: + description: + - "(Optional) Whether to enable Cloud TPU resources in this cluster." + - See the official documentation - U(https://cloud.google.com/tpu/docs/kubernetes-engine-setup) + . + returned: success + type: bool +tpuIpv4CidrBlock: + description: + - The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + notation (e.g. `1.2.3.4/29`). + returned: success + type: str +addonsConfig: + description: + - Configurations for the various addons available to run in the cluster. + returned: success + type: complex + contains: + httpLoadBalancing: + description: + - Configuration for the HTTP (L7) load balancing controller addon, which makes + it easy to set up HTTP load balancers for services in a cluster. + returned: success + type: complex + contains: + disabled: + description: + - Whether the HTTP Load Balancing controller is enabled in the cluster. + When enabled, it runs a small pod in the cluster that manages the load + balancers. + returned: success + type: bool + horizontalPodAutoscaling: + description: + - Configuration for the horizontal pod autoscaling feature, which increases + or decreases the number of replica pods a replication controller has based + on the resource usage of the existing pods. + returned: success + type: complex + contains: + disabled: + description: + - Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. + When enabled, it ensures that a Heapster pod is running in the cluster, + which is also used by the Cloud Monitoring service. + returned: success + type: bool + networkPolicyConfig: + description: + - Configuration for NetworkPolicy. This only tracks whether the addon is enabled + or not on the Master, it does not track whether network policy is enabled + for the nodes. + returned: success + type: complex + contains: + disabled: + description: + - Whether NetworkPolicy is enabled for this cluster. + returned: success + type: bool +subnetwork: + description: + - The name of the Google Compute Engine subnetwork to which the cluster is connected. + returned: success + type: str +locations: + description: + - The list of Google Compute Engine zones in which the cluster's nodes should be + located. + returned: success + type: list +resourceLabels: + description: + - The resource labels for the cluster to use to annotate any related Google Compute + Engine resources. + returned: success + type: dict +labelFingerprint: + description: + - The fingerprint of the set of labels for this cluster. + returned: success + type: str +legacyAbac: + description: + - Configuration for the legacy ABAC authorization mode. + returned: success + type: complex + contains: + enabled: + description: + - Whether the ABAC authorizer is enabled for this cluster. When enabled, identities + in the system, including service accounts, nodes, and controllers, will have + statically granted permissions beyond those provided by the RBAC configuration + or IAM. + returned: success + type: bool +networkPolicy: + description: + - Configuration options for the NetworkPolicy feature. + returned: success + type: complex + contains: + provider: + description: + - The selected network policy provider. + returned: success + type: str + enabled: + description: + - Whether network policy is enabled on the cluster. + returned: success + type: bool +defaultMaxPodsConstraint: + description: + - The default constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool of this cluster. + - Only honored if cluster created with IP Alias support. + returned: success + type: complex + contains: + maxPodsPerNode: + description: + - Constraint enforced on the max num of pods per node. + returned: success + type: str +ipAllocationPolicy: + description: + - Configuration for controlling how IPs are allocated in the cluster. + returned: success + type: complex + contains: + useIpAliases: + description: + - Whether alias IPs will be used for pod IPs in the cluster. + returned: success + type: bool + createSubnetwork: + description: + - Whether a new subnetwork will be created automatically for the cluster. + returned: success + type: bool + subnetworkName: + description: + - A custom subnetwork name to be used if createSubnetwork is true. + - If this field is empty, then an automatic name will be chosen for the new + subnetwork. + returned: success + type: str + clusterSecondaryRangeName: + description: + - The name of the secondary range to be used for the cluster CIDR block. The + secondary range will be used for pod IP addresses. + - This must be an existing secondary range associated with the cluster subnetwork + . + returned: success + type: str + servicesSecondaryRangeName: + description: + - The name of the secondary range to be used as for the services CIDR block. + The secondary range will be used for service ClusterIPs. This must be an existing + secondary range associated with the cluster subnetwork. + returned: success + type: str + clusterIpv4CidrBlock: + description: + - The IP address range for the cluster pod IPs. If this field is set, then cluster.cluster_ipv4_cidr + must be left blank. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + nodeIpv4CidrBlock: + description: + - The IP address range of the instance IPs in this cluster. + - This is applicable only if createSubnetwork is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + servicesIpv4CidrBlock: + description: + - The IP address range of the services IPs in this cluster. If blank, a range + will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + tpuIpv4CidrBlock: + description: + - The IP address range of the Cloud TPUs in this cluster. If unspecified, a + range will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - If unspecified, the range will use the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str +endpoint: + description: + - The IP address of this cluster's master endpoint. + - The endpoint can be accessed from the internet at https://username:password@endpoint/ + See the masterAuth property of this resource for username and password information. + returned: success + type: str +initialClusterVersion: + description: + - The software version of the master endpoint and kubelets used in the cluster when + it was first created. The version can be upgraded over time. + returned: success + type: str +currentMasterVersion: + description: + - The current software version of the master endpoint. + returned: success + type: str +currentNodeVersion: + description: + - The current version of the node software components. If they are currently at + multiple versions because they're in the process of being upgraded, this reflects + the minimum version of all nodes. + returned: success + type: str +createTime: + description: + - The time the cluster was created, in RFC3339 text format. + returned: success + type: str +status: + description: + - The current status of this cluster. + returned: success + type: str +statusMessage: + description: + - Additional information about the current status of this cluster, if available. + returned: success + type: str +nodeIpv4CidrSize: + description: + - The size of the address space on each node for hosting containers. + - This is provisioned from within the container_ipv4_cidr range. + returned: success + type: int +servicesIpv4Cidr: + description: + - The IP address range of the Kubernetes services in this cluster, in CIDR notation + (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the + container CIDR. + returned: success + type: str +currentNodeCount: + description: + - The number of nodes currently in the cluster. + returned: success + type: int +expireTime: + description: + - The time the cluster will be automatically deleted in RFC3339 text format. + returned: success + type: str +conditions: + description: + - Which conditions caused the current cluster state. + returned: success + type: complex + contains: + code: + description: + - Machine-friendly representation of the condition. + returned: success + type: str + message: + description: + - Human-friendly representation of the condition. + returned: success + type: str +masterAuthorizedNetworksConfig: + description: + - Configuration for controlling how IPs are allocated in the cluster. + returned: success + type: complex + contains: + enabled: + description: + - Whether or not master authorized networks is enabled. + returned: success + type: bool + cidrBlocks: + description: + - Define up to 50 external networks that could access Kubernetes master through + HTTPS. + returned: success + type: complex + contains: + displayName: + description: + - Optional field used to identify cidr blocks. + returned: success + type: str + cidrBlock: + description: + - Block specified in CIDR notation. + returned: success + type: str +nodePools: + description: + - Node pools belonging to this cluster. + returned: success + type: complex + contains: + name: + description: + - Name of the node pool. + returned: success + type: str +binaryAuthorization: + description: + - Configuration for the BinaryAuthorization feature. + returned: success + type: complex + contains: + enabled: + description: + - If enabled, all container images will be validated by Binary Authorization. + returned: success + type: bool +releaseChannel: + description: + - ReleaseChannel indicates which release channel a cluster is subscribed to. + - Release channels are arranged in order of risk and frequency of updates. + returned: success + type: complex + contains: + channel: + description: + - Which release channel the cluster is subscribed to. + returned: success + type: str +shieldedNodes: + description: + - Shielded Nodes configuration. + returned: success + type: complex + contains: + enabled: + description: + - Whether Shielded Nodes features are enabled on all nodes in this cluster. + returned: success + type: bool +networkConfig: + description: + - Network configurations . + returned: success + type: complex + contains: + enableIntraNodeVisibility: + description: + - Whether Intra-node visibility is enabled for this cluster. This makes same + node pod to pod traffic visible for VPC network. + returned: success + type: bool + network: + description: + - The relative name of the Google Compute Engine network to which the cluster + is connected. + - 'Example: projects/my-project/global/networks/my-network .' + returned: success + type: str + subnetwork: + description: + - The relative name of the Google Compute Engine subnetwork to which the cluster + is connected. + - 'Example: projects/my-project/regions/us-central1/subnetworks/my-subnet .' + returned: success + type: str + defaultSnatStatus: + description: + - Whether the cluster disables default in-node sNAT rules. In-node sNAT rules + will be disabled when defaultSnatStatus is disabled. + returned: success + type: bool +enableKubernetesAlpha: + description: + - Kubernetes alpha features are enabled on this cluster. This includes alpha API + groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes + version of the master and nodes. + returned: success + type: bool +location: + description: + - The location where the cluster is deployed. + returned: success + type: str +kubectlPath: + description: + - The path that the kubectl config file will be written to. + - The file will not be created if this path is unset. + - Any existing file at this path will be completely overwritten. + - This requires the PyYaml library. + returned: success + type: str +kubectlContext: + description: + - The name of the context for the kubectl config file. Will default to the cluster + name. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(type='str'), + description=dict(type='str'), + initial_node_count=dict(type='int'), + node_config=dict( + type='dict', + options=dict( + machine_type=dict(type='str'), + disk_size_gb=dict(type='int'), + oauth_scopes=dict(type='list', elements='str'), + service_account=dict(type='str'), + metadata=dict(type='dict'), + image_type=dict(type='str'), + labels=dict(type='dict'), + local_ssd_count=dict(type='int'), + tags=dict(type='list', elements='str'), + preemptible=dict(type='bool'), + accelerators=dict(type='list', elements='dict', options=dict(accelerator_count=dict(type='str'), accelerator_type=dict(type='str'))), + disk_type=dict(type='str'), + min_cpu_platform=dict(type='str'), + taints=dict(type='list', elements='dict', options=dict(key=dict(type='str'), value=dict(type='str'), effect=dict(type='str'))), + shielded_instance_config=dict( + type='dict', options=dict(enable_secure_boot=dict(type='bool'), enable_integrity_monitoring=dict(type='bool')) + ), + ), + ), + master_auth=dict( + type='dict', + options=dict( + username=dict(type='str'), + password=dict(type='str'), + client_certificate_config=dict(type='dict', options=dict(issue_client_certificate=dict(type='bool'))), + ), + ), + logging_service=dict(type='str'), + monitoring_service=dict(type='str'), + network=dict(type='str'), + database_encryption=dict(type='dict', options=dict(state=dict(type='str'), key_name=dict(type='str'))), + private_cluster_config=dict( + type='dict', + options=dict(enable_private_nodes=dict(type='bool'), enable_private_endpoint=dict(type='bool'), master_ipv4_cidr_block=dict(type='str')), + ), + cluster_ipv4_cidr=dict(type='str'), + enable_tpu=dict(type='bool'), + addons_config=dict( + type='dict', + options=dict( + http_load_balancing=dict(type='dict', options=dict(disabled=dict(type='bool'))), + horizontal_pod_autoscaling=dict(type='dict', options=dict(disabled=dict(type='bool'))), + network_policy_config=dict(type='dict', options=dict(disabled=dict(type='bool'))), + ), + ), + subnetwork=dict(type='str'), + locations=dict(type='list', elements='str', aliases=['nodeLocations']), + resource_labels=dict(type='dict'), + legacy_abac=dict(type='dict', options=dict(enabled=dict(type='bool'))), + network_policy=dict(type='dict', options=dict(provider=dict(type='str'), enabled=dict(type='bool'))), + default_max_pods_constraint=dict(type='dict', options=dict(max_pods_per_node=dict(type='str'))), + ip_allocation_policy=dict( + type='dict', + options=dict( + use_ip_aliases=dict(type='bool'), + create_subnetwork=dict(type='bool'), + subnetwork_name=dict(type='str'), + cluster_secondary_range_name=dict(type='str'), + services_secondary_range_name=dict(type='str'), + cluster_ipv4_cidr_block=dict(type='str'), + node_ipv4_cidr_block=dict(type='str'), + services_ipv4_cidr_block=dict(type='str'), + tpu_ipv4_cidr_block=dict(type='str'), + ), + ), + initial_cluster_version=dict(type='str'), + master_authorized_networks_config=dict( + type='dict', + options=dict( + enabled=dict(type='bool'), + cidr_blocks=dict(type='list', elements='dict', options=dict(display_name=dict(type='str'), cidr_block=dict(type='str'))), + ), + ), + binary_authorization=dict(type='dict', options=dict(enabled=dict(type='bool'))), + release_channel=dict(type='dict', options=dict(channel=dict(type='str'))), + shielded_nodes=dict(type='dict', options=dict(enabled=dict(type='bool'))), + network_config=dict(type='dict', options=dict(enable_intra_node_visibility=dict(type='bool'), default_snat_status=dict(type='bool'))), + enable_kubernetes_alpha=dict(type='bool'), + location=dict(required=True, type='str', aliases=['zone']), + kubectl_path=dict(type='str'), + kubectl_context=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + delete_default_node_pool(module) + changed = True + else: + fetch = {} + + if module.params.get('kubectl_path'): + Kubectl(module).write_file() + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'container') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'container') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'container') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'initialNodeCount': module.params.get('initial_node_count'), + u'nodeConfig': ClusterNodeconfig(module.params.get('node_config', {}), module).to_request(), + u'masterAuth': ClusterMasterauth(module.params.get('master_auth', {}), module).to_request(), + u'loggingService': module.params.get('logging_service'), + u'monitoringService': module.params.get('monitoring_service'), + u'network': module.params.get('network'), + u'databaseEncryption': ClusterDatabaseencryption(module.params.get('database_encryption', {}), module).to_request(), + u'privateClusterConfig': ClusterPrivateclusterconfig(module.params.get('private_cluster_config', {}), module).to_request(), + u'clusterIpv4Cidr': module.params.get('cluster_ipv4_cidr'), + u'enableTpu': module.params.get('enable_tpu'), + u'addonsConfig': ClusterAddonsconfig(module.params.get('addons_config', {}), module).to_request(), + u'subnetwork': module.params.get('subnetwork'), + u'locations': module.params.get('locations'), + u'resourceLabels': module.params.get('resource_labels'), + u'legacyAbac': ClusterLegacyabac(module.params.get('legacy_abac', {}), module).to_request(), + u'networkPolicy': ClusterNetworkpolicy(module.params.get('network_policy', {}), module).to_request(), + u'defaultMaxPodsConstraint': ClusterDefaultmaxpodsconstraint(module.params.get('default_max_pods_constraint', {}), module).to_request(), + u'ipAllocationPolicy': ClusterIpallocationpolicy(module.params.get('ip_allocation_policy', {}), module).to_request(), + u'initialClusterVersion': module.params.get('initial_cluster_version'), + u'masterAuthorizedNetworksConfig': ClusterMasterauthorizednetworksconfig( + module.params.get('master_authorized_networks_config', {}), module + ).to_request(), + u'binaryAuthorization': ClusterBinaryauthorization(module.params.get('binary_authorization', {}), module).to_request(), + u'releaseChannel': ClusterReleasechannel(module.params.get('release_channel', {}), module).to_request(), + u'shieldedNodes': ClusterShieldednodes(module.params.get('shielded_nodes', {}), module).to_request(), + u'networkConfig': ClusterNetworkconfig(module.params.get('network_config', {}), module).to_request(), + u'enableKubernetesAlpha': module.params.get('enable_kubernetes_alpha'), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'container') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters/{name}".format(**module.params) + + +def collection(module): + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'initialNodeCount': module.params.get('initial_node_count'), + u'nodeConfig': ClusterNodeconfig(module.params.get('node_config', {}), module).to_request(), + u'masterAuth': ClusterMasterauth(response.get(u'masterAuth', {}), module).from_response(), + u'loggingService': response.get(u'loggingService'), + u'monitoringService': response.get(u'monitoringService'), + u'network': response.get(u'network'), + u'databaseEncryption': ClusterDatabaseencryption(response.get(u'databaseEncryption', {}), module).from_response(), + u'privateClusterConfig': ClusterPrivateclusterconfig(response.get(u'privateClusterConfig', {}), module).from_response(), + u'clusterIpv4Cidr': response.get(u'clusterIpv4Cidr'), + u'enableTpu': response.get(u'enableTpu'), + u'tpuIpv4CidrBlock': response.get(u'tpuIpv4CidrBlock'), + u'addonsConfig': ClusterAddonsconfig(response.get(u'addonsConfig', {}), module).from_response(), + u'subnetwork': response.get(u'subnetwork'), + u'locations': response.get(u'locations'), + u'resourceLabels': response.get(u'resourceLabels'), + u'labelFingerprint': response.get(u'labelFingerprint'), + u'legacyAbac': ClusterLegacyabac(response.get(u'legacyAbac', {}), module).from_response(), + u'networkPolicy': ClusterNetworkpolicy(response.get(u'networkPolicy', {}), module).from_response(), + u'defaultMaxPodsConstraint': ClusterDefaultmaxpodsconstraint(response.get(u'defaultMaxPodsConstraint', {}), module).from_response(), + u'ipAllocationPolicy': ClusterIpallocationpolicy(response.get(u'ipAllocationPolicy', {}), module).from_response(), + u'endpoint': response.get(u'endpoint'), + u'initialClusterVersion': response.get(u'initialClusterVersion'), + u'currentMasterVersion': response.get(u'currentMasterVersion'), + u'currentNodeVersion': response.get(u'currentNodeVersion'), + u'createTime': response.get(u'createTime'), + u'status': response.get(u'status'), + u'statusMessage': response.get(u'statusMessage'), + u'nodeIpv4CidrSize': response.get(u'nodeIpv4CidrSize'), + u'servicesIpv4Cidr': response.get(u'servicesIpv4Cidr'), + u'currentNodeCount': response.get(u'currentNodeCount'), + u'expireTime': response.get(u'expireTime'), + u'conditions': ClusterConditionsArray(response.get(u'conditions', []), module).from_response(), + u'masterAuthorizedNetworksConfig': ClusterMasterauthorizednetworksconfig(response.get(u'masterAuthorizedNetworksConfig', {}), module).from_response(), + u'nodePools': ClusterNodepoolsArray(response.get(u'nodePools', []), module).from_response(), + u'binaryAuthorization': ClusterBinaryauthorization(response.get(u'binaryAuthorization', {}), module).from_response(), + u'releaseChannel': ClusterReleasechannel(response.get(u'releaseChannel', {}), module).from_response(), + u'shieldedNodes': ClusterShieldednodes(response.get(u'shieldedNodes', {}), module).from_response(), + u'networkConfig': ClusterNetworkconfig(response.get(u'networkConfig', {}), module).from_response(), + u'enableKubernetesAlpha': response.get(u'enableKubernetesAlpha'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://container.googleapis.com/v1/projects/{project}/locations/{location}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink'])) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +# Google Container Engine API has its own layout for the create method, +# defined like this: +# +# { +# 'cluster': { +# ... cluster data +# } +# } +# +# Format the request to match the expected input by the API +def encode_request(resource_request, module): + return {'cluster': resource_request} + + +# Deletes the default node pool on default creation. +def delete_default_node_pool(module): + auth = GcpSession(module, 'container') + link = "https://container.googleapis.com/v1/projects/%s/locations/%s/clusters/%s/nodePools/default-pool" % ( + module.params['project'], + module.params['location'], + module.params['name'], + ) + return wait_for_operation(module, auth.delete(link)) + + +class Kubectl(object): + def __init__(self, module): + self.module = module + + """ + Writes a kubectl config file + kubectl_path must be set or this will fail. + """ + + def write_file(self): + try: + import yaml + except ImportError: + self.module.fail_json(msg="Please install the pyyaml module") + + with open(self.module.params['kubectl_path'], 'w') as f: + f.write(yaml.dump(self._contents())) + + """ + Returns the contents of a kubectl file + """ + + def _contents(self): + token = self._auth_token() + endpoint = "https://%s" % self.fetch["endpoint"] + context = self.module.params.get('kubectl_context') + if not context: + context = self.module.params['name'] + + user = { + 'name': context, + 'user': { + 'auth-provider': { + 'config': { + 'access-token': token, + 'cmd-args': 'config config-helper --format=json', + 'cmd-path': '/usr/lib64/google-cloud-sdk/bin/gcloud', + 'expiry-key': '{.credential.token_expiry}', + 'token-key': '{.credential.access_token}', + }, + 'name': 'gcp', + }, + }, + } + + auth_keyword = self.fetch['masterAuth'].keys() + if 'username' in auth_keyword and 'password' in auth_keyword: + user['user']['auth-provider'].update({ + 'username': str(self.fetch['masterAuth']['username']), + 'password': str(self.fetch['masterAuth']['password']), + }) + + return { + 'apiVersion': 'v1', + 'clusters': [{'name': context, 'cluster': {'certificate-authority-data': str(self.fetch['masterAuth']['clusterCaCertificate'])}}], + 'contexts': [{'name': context, 'context': {'cluster': context, 'user': context}}], + 'current-context': context, + 'kind': 'Config', + 'preferences': {}, + 'users': [user], + } + + """ + Returns the auth token used in kubectl + This also sets the 'fetch' variable used in creating the kubectl + """ + + def _auth_token(self): + auth = GcpSession(self.module, 'auth') + response = auth.get(self_link(self.module)) + self.fetch = response.json() + return response.request.headers['authorization'].split(' ')[1] + + +class ClusterNodeconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'machineType': self.request.get('machine_type'), + u'diskSizeGb': self.request.get('disk_size_gb'), + u'oauthScopes': self.request.get('oauth_scopes'), + u'serviceAccount': self.request.get('service_account'), + u'metadata': self.request.get('metadata'), + u'imageType': self.request.get('image_type'), + u'labels': self.request.get('labels'), + u'localSsdCount': self.request.get('local_ssd_count'), + u'tags': self.request.get('tags'), + u'preemptible': self.request.get('preemptible'), + u'accelerators': ClusterAcceleratorsArray(self.request.get('accelerators', []), self.module).to_request(), + u'diskType': self.request.get('disk_type'), + u'minCpuPlatform': self.request.get('min_cpu_platform'), + u'taints': ClusterTaintsArray(self.request.get('taints', []), self.module).to_request(), + u'shieldedInstanceConfig': ClusterShieldedinstanceconfig(self.request.get('shielded_instance_config', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'machineType': self.request.get(u'machineType'), + u'diskSizeGb': self.request.get(u'diskSizeGb'), + u'oauthScopes': self.request.get(u'oauthScopes'), + u'serviceAccount': self.request.get(u'serviceAccount'), + u'metadata': self.request.get(u'metadata'), + u'imageType': self.request.get(u'imageType'), + u'labels': self.request.get(u'labels'), + u'localSsdCount': self.request.get(u'localSsdCount'), + u'tags': self.request.get(u'tags'), + u'preemptible': self.request.get(u'preemptible'), + u'accelerators': ClusterAcceleratorsArray(self.request.get(u'accelerators', []), self.module).from_response(), + u'diskType': self.request.get(u'diskType'), + u'minCpuPlatform': self.request.get(u'minCpuPlatform'), + u'taints': ClusterTaintsArray(self.request.get(u'taints', []), self.module).from_response(), + u'shieldedInstanceConfig': ClusterShieldedinstanceconfig(self.request.get(u'shieldedInstanceConfig', {}), self.module).from_response(), + } + ) + + +class ClusterAcceleratorsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get('accelerator_count'), u'acceleratorType': item.get('accelerator_type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get(u'acceleratorCount'), u'acceleratorType': item.get(u'acceleratorType')}) + + +class ClusterTaintsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'key': item.get('key'), u'value': item.get('value'), u'effect': item.get('effect')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'key': item.get(u'key'), u'value': item.get(u'value'), u'effect': item.get(u'effect')}) + + +class ClusterShieldedinstanceconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'enableSecureBoot': self.request.get('enable_secure_boot'), u'enableIntegrityMonitoring': self.request.get('enable_integrity_monitoring')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'enableSecureBoot': self.request.get(u'enableSecureBoot'), u'enableIntegrityMonitoring': self.request.get(u'enableIntegrityMonitoring')} + ) + + +class ClusterMasterauth(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'username': self.request.get('username'), + u'password': self.request.get('password'), + u'clientCertificateConfig': ClusterClientcertificateconfig(self.request.get('client_certificate_config', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'username': self.request.get(u'username'), + u'password': self.request.get(u'password'), + u'clientCertificateConfig': ClusterClientcertificateconfig(self.request.get(u'clientCertificateConfig', {}), self.module).from_response(), + } + ) + + +class ClusterClientcertificateconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'issueClientCertificate': self.request.get('issue_client_certificate')}) + + def from_response(self): + return remove_nones_from_dict({u'issueClientCertificate': self.request.get(u'issueClientCertificate')}) + + +class ClusterDatabaseencryption(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'state': self.request.get('state'), u'keyName': self.request.get('key_name')}) + + def from_response(self): + return remove_nones_from_dict({u'state': self.request.get(u'state'), u'keyName': self.request.get(u'keyName')}) + + +class ClusterPrivateclusterconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'enablePrivateNodes': self.request.get('enable_private_nodes'), + u'enablePrivateEndpoint': self.request.get('enable_private_endpoint'), + u'masterIpv4CidrBlock': self.request.get('master_ipv4_cidr_block'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'enablePrivateNodes': self.request.get(u'enablePrivateNodes'), + u'enablePrivateEndpoint': self.request.get(u'enablePrivateEndpoint'), + u'masterIpv4CidrBlock': self.request.get(u'masterIpv4CidrBlock'), + } + ) + + +class ClusterAddonsconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'httpLoadBalancing': ClusterHttploadbalancing(self.request.get('http_load_balancing', {}), self.module).to_request(), + u'horizontalPodAutoscaling': ClusterHorizontalpodautoscaling(self.request.get('horizontal_pod_autoscaling', {}), self.module).to_request(), + u'networkPolicyConfig': ClusterNetworkpolicyconfig(self.request.get('network_policy_config', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'httpLoadBalancing': ClusterHttploadbalancing(self.request.get(u'httpLoadBalancing', {}), self.module).from_response(), + u'horizontalPodAutoscaling': ClusterHorizontalpodautoscaling(self.request.get(u'horizontalPodAutoscaling', {}), self.module).from_response(), + u'networkPolicyConfig': ClusterNetworkpolicyconfig(self.request.get(u'networkPolicyConfig', {}), self.module).from_response(), + } + ) + + +class ClusterHttploadbalancing(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'disabled': self.request.get('disabled')}) + + def from_response(self): + return remove_nones_from_dict({u'disabled': self.request.get(u'disabled')}) + + +class ClusterHorizontalpodautoscaling(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'disabled': self.request.get('disabled')}) + + def from_response(self): + return remove_nones_from_dict({u'disabled': self.request.get(u'disabled')}) + + +class ClusterNetworkpolicyconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'disabled': self.request.get('disabled')}) + + def from_response(self): + return remove_nones_from_dict({u'disabled': self.request.get(u'disabled')}) + + +class ClusterLegacyabac(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enabled': self.request.get('enabled')}) + + def from_response(self): + return remove_nones_from_dict({u'enabled': self.request.get(u'enabled')}) + + +class ClusterNetworkpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'provider': self.request.get('provider'), u'enabled': self.request.get('enabled')}) + + def from_response(self): + return remove_nones_from_dict({u'provider': self.request.get(u'provider'), u'enabled': self.request.get(u'enabled')}) + + +class ClusterDefaultmaxpodsconstraint(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'maxPodsPerNode': self.request.get('max_pods_per_node')}) + + def from_response(self): + return remove_nones_from_dict({u'maxPodsPerNode': self.request.get(u'maxPodsPerNode')}) + + +class ClusterIpallocationpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'useIpAliases': self.request.get('use_ip_aliases'), + u'createSubnetwork': self.request.get('create_subnetwork'), + u'subnetworkName': self.request.get('subnetwork_name'), + u'clusterSecondaryRangeName': self.request.get('cluster_secondary_range_name'), + u'servicesSecondaryRangeName': self.request.get('services_secondary_range_name'), + u'clusterIpv4CidrBlock': self.request.get('cluster_ipv4_cidr_block'), + u'nodeIpv4CidrBlock': self.request.get('node_ipv4_cidr_block'), + u'servicesIpv4CidrBlock': self.request.get('services_ipv4_cidr_block'), + u'tpuIpv4CidrBlock': self.request.get('tpu_ipv4_cidr_block'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'useIpAliases': self.request.get(u'useIpAliases'), + u'createSubnetwork': self.request.get(u'createSubnetwork'), + u'subnetworkName': self.request.get(u'subnetworkName'), + u'clusterSecondaryRangeName': self.request.get(u'clusterSecondaryRangeName'), + u'servicesSecondaryRangeName': self.request.get(u'servicesSecondaryRangeName'), + u'clusterIpv4CidrBlock': self.request.get(u'clusterIpv4CidrBlock'), + u'nodeIpv4CidrBlock': self.request.get(u'nodeIpv4CidrBlock'), + u'servicesIpv4CidrBlock': self.request.get(u'servicesIpv4CidrBlock'), + u'tpuIpv4CidrBlock': self.request.get(u'tpuIpv4CidrBlock'), + } + ) + + +class ClusterConditionsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'code': item.get('code'), u'message': item.get('message')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'code': item.get(u'code'), u'message': item.get(u'message')}) + + +class ClusterMasterauthorizednetworksconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'enabled': self.request.get('enabled'), u'cidrBlocks': ClusterCidrblocksArray(self.request.get('cidr_blocks', []), self.module).to_request()} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'enabled': self.request.get(u'enabled'), u'cidrBlocks': ClusterCidrblocksArray(self.request.get(u'cidrBlocks', []), self.module).from_response()} + ) + + +class ClusterCidrblocksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'displayName': item.get('display_name'), u'cidrBlock': item.get('cidr_block')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'displayName': item.get(u'displayName'), u'cidrBlock': item.get(u'cidrBlock')}) + + +class ClusterNodepoolsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name')}) + + +class ClusterBinaryauthorization(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enabled': self.request.get('enabled')}) + + def from_response(self): + return remove_nones_from_dict({u'enabled': self.request.get(u'enabled')}) + + +class ClusterReleasechannel(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'channel': self.request.get('channel')}) + + def from_response(self): + return remove_nones_from_dict({u'channel': self.request.get(u'channel')}) + + +class ClusterShieldednodes(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enabled': self.request.get('enabled')}) + + def from_response(self): + return remove_nones_from_dict({u'enabled': self.request.get(u'enabled')}) + + +class ClusterNetworkconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'enableIntraNodeVisibility': self.request.get('enable_intra_node_visibility'), u'defaultSnatStatus': self.request.get('default_snat_status')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'enableIntraNodeVisibility': self.request.get(u'enableIntraNodeVisibility'), u'defaultSnatStatus': self.request.get(u'defaultSnatStatus')} + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_container_cluster_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_container_cluster_info.py new file mode 100644 index 000000000..77d57793f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_container_cluster_info.py @@ -0,0 +1,895 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_container_cluster_info +description: +- Gather info for GCP Cluster +short_description: Gather info for GCP Cluster +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location where the cluster is deployed. + required: true + type: str + aliases: + - region + - zone + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a cluster + gcp_container_cluster_info: + location: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of this cluster. The name must be unique within this project and + location, and can be up to 40 characters. Must be Lowercase letters, numbers, + and hyphens only. Must start with a letter. Must end with a number or a letter. + returned: success + type: str + description: + description: + - An optional description of this cluster. + returned: success + type: str + initialNodeCount: + description: + - The number of nodes to create in this cluster. You must ensure that your Compute + Engine resource quota is sufficient for this number of instances. You must + also have available firewall and routes quota. For requests, this field should + only be used in lieu of a "nodePool" object, since this configuration (along + with the "nodeConfig") will be used to create a "NodePool" object with an + auto-generated name. Do not use this and a nodePool at the same time. + - This field has been deprecated. Please use nodePool.initial_node_count instead. + returned: success + type: int + nodeConfig: + description: + - Parameters used in creating the cluster's nodes. + - For requests, this field should only be used in lieu of a "nodePool" object, + since this configuration (along with the "initialNodeCount") will be used + to create a "NodePool" object with an auto-generated name. Do not use this + and a nodePool at the same time. For responses, this field will be populated + with the node configuration of the first node pool. If unspecified, the defaults + are used. + returned: success + type: complex + contains: + machineType: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + returned: success + type: str + diskSizeGb: + description: + - Size of the disk attached to each node, specified in GB. The smallest + allowed disk size is 10GB. If unspecified, the default disk size is 100GB. + returned: success + type: int + oauthScopes: + description: + - The set of Google API scopes to be made available on all of the node VMs + under the "default" service account. + - 'The following scopes are recommended, but not required, and by default + are not included: U(https://www.googleapis.com/auth/compute) is required + for mounting persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for + communicating with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + returned: success + type: list + serviceAccount: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. + If no Service Account is specified, the "default" service account is used. + returned: success + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. + Additionally, to avoid ambiguity, keys must not conflict with any other + metadata keys for the project or be one of the four reserved keys: "instance-template", + "kube-env", "startup-script", and "user-data" Values are free-form strings, + and only have meaning as interpreted by the image running in the instance. + The only restriction placed on them is that each value''s size must be + less than or equal to 32 KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + imageType: + description: + - The image type to use for this node. Note that for a given image type, + the latest version of it will be used. + returned: success + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each + node. These will added in addition to any default label(s) that Kubernetes + may apply to the node. In case of conflict in label keys, the applied + set may differ depending on the Kubernetes version -- it''s best to assume + the behavior is undefined and conflicts should be avoided. For more information, + including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + localSsdCount: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks + available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + returned: success + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the + client during cluster or node pool creation. Each tag within the list + must comply with RFC1035. + returned: success + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + returned: success + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. See U(https://cloud.google.com/compute/docs/gpus) + for more information about support for GPUs. + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of accelerator cards exposed to an instance. + returned: success + type: str + acceleratorType: + description: + - The accelerator type resource name. + returned: success + type: str + diskType: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd-standard' . + returned: success + type: str + minCpuPlatform: + description: + - Minimum CPU platform to be used by this instance. The instance may be + scheduled on the specified or newer CPU platform. + returned: success + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + - 'For more information, including usage and the valid values, see: U(https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + .' + returned: success + type: complex + contains: + key: + description: + - Key for taint. + returned: success + type: str + value: + description: + - Value for taint. + returned: success + type: str + effect: + description: + - Effect for taint. + returned: success + type: str + shieldedInstanceConfig: + description: + - Shielded Instance options. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + - Secure Boot helps ensure that the system only runs authentic software + by verifying the digital signature of all boot components, and halting + the boot process if signature verification fails. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + - Enables monitoring and attestation of the boot integrity of the instance. + - The attestation is performed against the integrity policy baseline. + This baseline is initially derived from the implicitly trusted boot + image when the instance is created. + returned: success + type: bool + masterAuth: + description: + - The authentication information for accessing the master endpoint. + returned: success + type: complex + contains: + username: + description: + - The username to use for HTTP basic authentication to the master endpoint. + returned: success + type: str + password: + description: + - The password to use for HTTP basic authentication to the master endpoint. + Because the master endpoint is open to the Internet, you should create + a strong password with a minimum of 16 characters. + returned: success + type: str + clientCertificateConfig: + description: + - Configuration for client certificate authentication on the cluster. For + clusters before v1.12, if no configuration is specified, a client certificate + is issued. + returned: success + type: complex + contains: + issueClientCertificate: + description: + - Issue a client certificate. + returned: success + type: bool + clusterCaCertificate: + description: + - Base64-encoded public certificate that is the root of trust for the cluster. + returned: success + type: str + clientCertificate: + description: + - Base64-encoded public certificate used by clients to authenticate to the + cluster endpoint. + returned: success + type: str + clientKey: + description: + - Base64-encoded private key used by clients to authenticate to the cluster + endpoint. + returned: success + type: str + loggingService: + description: + - 'The logging service the cluster should use to write logs. Currently available + options: logging.googleapis.com - the Google Cloud Logging service.' + - none - no logs will be exported from the cluster. + - if left as an empty string,logging.googleapis.com will be used. + returned: success + type: str + monitoringService: + description: + - The monitoring service the cluster should use to write metrics. + - 'Currently available options: monitoring.googleapis.com - the Google Cloud + Monitoring service.' + - none - no metrics will be exported from the cluster. + - if left as an empty string, monitoring.googleapis.com will be used. + returned: success + type: str + network: + description: + - The name of the Google Compute Engine network to which the cluster is connected. + If left unspecified, the default network will be used. + returned: success + type: str + databaseEncryption: + description: + - Configuration of etcd encryption. + returned: success + type: complex + contains: + state: + description: + - Denotes the state of etcd encryption. + returned: success + type: str + keyName: + description: + - Name of CloudKMS key to use for the encryption of secrets in etcd. Ex. + - "`projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key` + ." + returned: success + type: str + privateClusterConfig: + description: + - Configuration for a private cluster. + returned: success + type: complex + contains: + enablePrivateNodes: + description: + - Whether nodes have internal IP addresses only. If enabled, all nodes are + given only RFC 1918 private addresses and communicate with the master + via private networking. + returned: success + type: bool + enablePrivateEndpoint: + description: + - Whether the master's internal IP address is used as the cluster endpoint. + returned: success + type: bool + masterIpv4CidrBlock: + description: + - The IP range in CIDR notation to use for the hosted master network. This + range will be used for assigning internal IP addresses to the master or + set of masters, as well as the ILB VIP. This range must not overlap with + any other ranges in use within the cluster's network. + returned: success + type: str + privateEndpoint: + description: + - The internal IP address of this cluster's master endpoint. + returned: success + type: str + publicEndpoint: + description: + - The external IP address of this cluster's master endpoint. + returned: success + type: str + clusterIpv4Cidr: + description: + - The IP address range of the container pods in this cluster, in CIDR notation + (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify + a /14 block in 10.0.0.0/8. + returned: success + type: str + enableTpu: + description: + - "(Optional) Whether to enable Cloud TPU resources in this cluster." + - See the official documentation - U(https://cloud.google.com/tpu/docs/kubernetes-engine-setup) + . + returned: success + type: bool + tpuIpv4CidrBlock: + description: + - The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + notation (e.g. `1.2.3.4/29`). + returned: success + type: str + addonsConfig: + description: + - Configurations for the various addons available to run in the cluster. + returned: success + type: complex + contains: + httpLoadBalancing: + description: + - Configuration for the HTTP (L7) load balancing controller addon, which + makes it easy to set up HTTP load balancers for services in a cluster. + returned: success + type: complex + contains: + disabled: + description: + - Whether the HTTP Load Balancing controller is enabled in the cluster. + When enabled, it runs a small pod in the cluster that manages the + load balancers. + returned: success + type: bool + horizontalPodAutoscaling: + description: + - Configuration for the horizontal pod autoscaling feature, which increases + or decreases the number of replica pods a replication controller has based + on the resource usage of the existing pods. + returned: success + type: complex + contains: + disabled: + description: + - Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. + When enabled, it ensures that a Heapster pod is running in the cluster, + which is also used by the Cloud Monitoring service. + returned: success + type: bool + networkPolicyConfig: + description: + - Configuration for NetworkPolicy. This only tracks whether the addon is + enabled or not on the Master, it does not track whether network policy + is enabled for the nodes. + returned: success + type: complex + contains: + disabled: + description: + - Whether NetworkPolicy is enabled for this cluster. + returned: success + type: bool + subnetwork: + description: + - The name of the Google Compute Engine subnetwork to which the cluster is connected. + returned: success + type: str + locations: + description: + - The list of Google Compute Engine zones in which the cluster's nodes should + be located. + returned: success + type: list + resourceLabels: + description: + - The resource labels for the cluster to use to annotate any related Google + Compute Engine resources. + returned: success + type: dict + labelFingerprint: + description: + - The fingerprint of the set of labels for this cluster. + returned: success + type: str + legacyAbac: + description: + - Configuration for the legacy ABAC authorization mode. + returned: success + type: complex + contains: + enabled: + description: + - Whether the ABAC authorizer is enabled for this cluster. When enabled, + identities in the system, including service accounts, nodes, and controllers, + will have statically granted permissions beyond those provided by the + RBAC configuration or IAM. + returned: success + type: bool + networkPolicy: + description: + - Configuration options for the NetworkPolicy feature. + returned: success + type: complex + contains: + provider: + description: + - The selected network policy provider. + returned: success + type: str + enabled: + description: + - Whether network policy is enabled on the cluster. + returned: success + type: bool + defaultMaxPodsConstraint: + description: + - The default constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool of this cluster. + - Only honored if cluster created with IP Alias support. + returned: success + type: complex + contains: + maxPodsPerNode: + description: + - Constraint enforced on the max num of pods per node. + returned: success + type: str + ipAllocationPolicy: + description: + - Configuration for controlling how IPs are allocated in the cluster. + returned: success + type: complex + contains: + useIpAliases: + description: + - Whether alias IPs will be used for pod IPs in the cluster. + returned: success + type: bool + createSubnetwork: + description: + - Whether a new subnetwork will be created automatically for the cluster. + returned: success + type: bool + subnetworkName: + description: + - A custom subnetwork name to be used if createSubnetwork is true. + - If this field is empty, then an automatic name will be chosen for the + new subnetwork. + returned: success + type: str + clusterSecondaryRangeName: + description: + - The name of the secondary range to be used for the cluster CIDR block. + The secondary range will be used for pod IP addresses. + - This must be an existing secondary range associated with the cluster subnetwork + . + returned: success + type: str + servicesSecondaryRangeName: + description: + - The name of the secondary range to be used as for the services CIDR block. + The secondary range will be used for service ClusterIPs. This must be + an existing secondary range associated with the cluster subnetwork. + returned: success + type: str + clusterIpv4CidrBlock: + description: + - The IP address range for the cluster pod IPs. If this field is set, then + cluster.cluster_ipv4_cidr must be left blank. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + nodeIpv4CidrBlock: + description: + - The IP address range of the instance IPs in this cluster. + - This is applicable only if createSubnetwork is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + servicesIpv4CidrBlock: + description: + - The IP address range of the services IPs in this cluster. If blank, a + range will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + tpuIpv4CidrBlock: + description: + - The IP address range of the Cloud TPUs in this cluster. If unspecified, + a range will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - If unspecified, the range will use the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + endpoint: + description: + - The IP address of this cluster's master endpoint. + - The endpoint can be accessed from the internet at https://username:password@endpoint/ + See the masterAuth property of this resource for username and password information. + returned: success + type: str + initialClusterVersion: + description: + - The software version of the master endpoint and kubelets used in the cluster + when it was first created. The version can be upgraded over time. + returned: success + type: str + currentMasterVersion: + description: + - The current software version of the master endpoint. + returned: success + type: str + currentNodeVersion: + description: + - The current version of the node software components. If they are currently + at multiple versions because they're in the process of being upgraded, this + reflects the minimum version of all nodes. + returned: success + type: str + createTime: + description: + - The time the cluster was created, in RFC3339 text format. + returned: success + type: str + status: + description: + - The current status of this cluster. + returned: success + type: str + statusMessage: + description: + - Additional information about the current status of this cluster, if available. + returned: success + type: str + nodeIpv4CidrSize: + description: + - The size of the address space on each node for hosting containers. + - This is provisioned from within the container_ipv4_cidr range. + returned: success + type: int + servicesIpv4Cidr: + description: + - The IP address range of the Kubernetes services in this cluster, in CIDR notation + (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from + the container CIDR. + returned: success + type: str + currentNodeCount: + description: + - The number of nodes currently in the cluster. + returned: success + type: int + expireTime: + description: + - The time the cluster will be automatically deleted in RFC3339 text format. + returned: success + type: str + conditions: + description: + - Which conditions caused the current cluster state. + returned: success + type: complex + contains: + code: + description: + - Machine-friendly representation of the condition. + returned: success + type: str + message: + description: + - Human-friendly representation of the condition. + returned: success + type: str + masterAuthorizedNetworksConfig: + description: + - Configuration for controlling how IPs are allocated in the cluster. + returned: success + type: complex + contains: + enabled: + description: + - Whether or not master authorized networks is enabled. + returned: success + type: bool + cidrBlocks: + description: + - Define up to 50 external networks that could access Kubernetes master + through HTTPS. + returned: success + type: complex + contains: + displayName: + description: + - Optional field used to identify cidr blocks. + returned: success + type: str + cidrBlock: + description: + - Block specified in CIDR notation. + returned: success + type: str + nodePools: + description: + - Node pools belonging to this cluster. + returned: success + type: complex + contains: + name: + description: + - Name of the node pool. + returned: success + type: str + binaryAuthorization: + description: + - Configuration for the BinaryAuthorization feature. + returned: success + type: complex + contains: + enabled: + description: + - If enabled, all container images will be validated by Binary Authorization. + returned: success + type: bool + releaseChannel: + description: + - ReleaseChannel indicates which release channel a cluster is subscribed to. + - Release channels are arranged in order of risk and frequency of updates. + returned: success + type: complex + contains: + channel: + description: + - Which release channel the cluster is subscribed to. + returned: success + type: str + shieldedNodes: + description: + - Shielded Nodes configuration. + returned: success + type: complex + contains: + enabled: + description: + - Whether Shielded Nodes features are enabled on all nodes in this cluster. + returned: success + type: bool + networkConfig: + description: + - Network configurations . + returned: success + type: complex + contains: + enableIntraNodeVisibility: + description: + - Whether Intra-node visibility is enabled for this cluster. This makes + same node pod to pod traffic visible for VPC network. + returned: success + type: bool + network: + description: + - The relative name of the Google Compute Engine network to which the cluster + is connected. + - 'Example: projects/my-project/global/networks/my-network .' + returned: success + type: str + subnetwork: + description: + - The relative name of the Google Compute Engine subnetwork to which the + cluster is connected. + - 'Example: projects/my-project/regions/us-central1/subnetworks/my-subnet + .' + returned: success + type: str + defaultSnatStatus: + description: + - Whether the cluster disables default in-node sNAT rules. In-node sNAT + rules will be disabled when defaultSnatStatus is disabled. + returned: success + type: bool + enableKubernetesAlpha: + description: + - Kubernetes alpha features are enabled on this cluster. This includes alpha + API groups (e.g. v1alpha1) and features that may not be production ready in + the kubernetes version of the master and nodes. + returned: success + type: bool + location: + description: + - The location where the cluster is deployed. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str', aliases=['region', 'zone']))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'container') + return auth.list(link, return_if_object, array_name='clusters') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool.py b/ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool.py new file mode 100644 index 000000000..82091a681 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool.py @@ -0,0 +1,1218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_container_node_pool +description: +- NodePool contains the name and configuration for a cluster's node pool. +- Node pools are a set of nodes (i.e. VM's), with a common configuration and specification, + under the control of the cluster master. They may have a set of Kubernetes labels + applied to them, which may be used to reference them during pod scheduling. They + may also be resized up or down, to accommodate the workload. +short_description: Creates a GCP NodePool +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name of the node pool. + required: false + type: str + config: + description: + - The node configuration of the pool. + required: false + type: dict + suboptions: + machine_type: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + required: false + type: str + disk_size_gb: + description: + - Size of the disk attached to each node, specified in GB. The smallest allowed + disk size is 10GB. If unspecified, the default disk size is 100GB. + required: false + type: int + oauth_scopes: + description: + - The set of Google API scopes to be made available on all of the node VMs + under the "default" service account. + - 'The following scopes are recommended, but not required, and by default + are not included: U(https://www.googleapis.com/auth/compute) is required + for mounting persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for + communicating with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + elements: str + required: false + type: list + service_account: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. If + no Service Account is specified, the "default" service account is used. + required: false + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. + Additionally, to avoid ambiguity, keys must not conflict with any other + metadata keys for the project or be one of the four reserved keys: "instance-template", + "kube-env", "startup-script", and "user-data" Values are free-form strings, + and only have meaning as interpreted by the image running in the instance. + The only restriction placed on them is that each value''s size must be less + than or equal to 32 KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + required: false + type: dict + image_type: + description: + - The image type to use for this node. Note that for a given image type, the + latest version of it will be used. + required: false + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each node. + These will added in addition to any default label(s) that Kubernetes may + apply to the node. In case of conflict in label keys, the applied set may + differ depending on the Kubernetes version -- it''s best to assume the behavior + is undefined and conflicts should be avoided. For more information, including + usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + required: false + type: dict + local_ssd_count: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks + available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + required: false + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the + client during cluster or node pool creation. Each tag within the list must + comply with RFC1035. + elements: str + required: false + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + required: false + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. + elements: dict + required: false + type: list + suboptions: + accelerator_count: + description: + - The number of the accelerator cards exposed to an instance. + required: false + type: int + accelerator_type: + description: + - The accelerator type resource name. + required: false + type: str + disk_type: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd-standard' . + required: false + type: str + min_cpu_platform: + description: + - Minimum CPU platform to be used by this instance. The instance may be scheduled + on the specified or newer CPU platform . + required: false + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + elements: dict + required: false + type: list + suboptions: + key: + description: + - Key for taint. + required: false + type: str + value: + description: + - Value for taint. + required: false + type: str + effect: + description: + - Effect for taint. + required: false + type: str + shielded_instance_config: + description: + - Shielded Instance options. + required: false + type: dict + suboptions: + enable_secure_boot: + description: + - Defines whether the instance has Secure Boot enabled. + - Secure Boot helps ensure that the system only runs authentic software + by verifying the digital signature of all boot components, and halting + the boot process if signature verification fails. + required: false + type: bool + enable_integrity_monitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + - Enables monitoring and attestation of the boot integrity of the instance. + - The attestation is performed against the integrity policy baseline. + This baseline is initially derived from the implicitly trusted boot + image when the instance is created. + required: false + type: bool + workload_meta_config: + description: + - WorkloadMetadataConfig defines the metadata configuration to expose to workloads + on the node pool. + required: false + type: dict + suboptions: + mode: + description: + - Mode is the configuration for how to expose metadata to workloads running + on the node pool. + - 'Some valid choices include: "GCE_METADATA", "GKE_METADATA"' + required: false + type: str + initial_node_count: + description: + - The initial node count for the pool. You must ensure that your Compute Engine + resource quota is sufficient for this number of instances. You must also have + available firewall and routes quota. + required: true + type: int + version: + description: + - The version of the Kubernetes of this node. + required: false + type: str + autoscaling: + description: + - Autoscaler configuration for this NodePool. Autoscaler is enabled only if a + valid configuration is present. + required: false + type: dict + suboptions: + enabled: + description: + - Is autoscaling enabled for this node pool. + required: false + type: bool + min_node_count: + description: + - Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. + required: false + type: int + max_node_count: + description: + - Maximum number of nodes in the NodePool. Must be >= minNodeCount. + - There has to enough quota to scale up the cluster. + required: false + type: int + management: + description: + - Management configuration for this NodePool. + required: false + type: dict + suboptions: + auto_upgrade: + description: + - A flag that specifies whether node auto-upgrade is enabled for the node + pool. If enabled, node auto-upgrade helps keep the nodes in your node pool + up to date with the latest release version of Kubernetes. + required: false + type: bool + auto_repair: + description: + - A flag that specifies whether the node auto-repair is enabled for the node + pool. If enabled, the nodes in this node pool will be monitored and, if + they fail health checks too many times, an automatic repair action will + be triggered. + required: false + type: bool + upgrade_options: + description: + - Specifies the Auto Upgrade knobs for the node pool. + required: false + type: dict + suboptions: {} + max_pods_constraint: + description: + - The constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool. + required: false + type: dict + suboptions: + max_pods_per_node: + description: + - Constraint enforced on the max num of pods per node. + required: false + type: int + conditions: + description: + - Which conditions caused the current node pool state. + elements: dict + required: false + type: list + suboptions: + code: + description: + - Machine-friendly representation of the condition. + - 'Some valid choices include: "UNKNOWN", "GCE_STOCKOUT", "GKE_SERVICE_ACCOUNT_DELETED", + "GCE_QUOTA_EXCEEDED", "SET_BY_OPERATOR"' + required: false + type: str + cluster: + description: + - The cluster this node pool belongs to. + - 'This field represents a link to a Cluster resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_container_cluster task and then set this cluster field to "{{ name-of-resource + }}"' + required: true + type: dict + location: + description: + - The location where the node pool is deployed. + required: true + type: str + aliases: + - region + - zone + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a cluster + google.cloud.gcp_container_cluster: + name: cluster-nodepool + initial_node_count: 4 + location: us-central1-a + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: cluster + +- name: create a node pool + google.cloud.gcp_container_node_pool: + name: my-pool + initial_node_count: 4 + cluster: "{{ cluster }}" + location: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name of the node pool. + returned: success + type: str +config: + description: + - The node configuration of the pool. + returned: success + type: complex + contains: + machineType: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + returned: success + type: str + diskSizeGb: + description: + - Size of the disk attached to each node, specified in GB. The smallest allowed + disk size is 10GB. If unspecified, the default disk size is 100GB. + returned: success + type: int + oauthScopes: + description: + - The set of Google API scopes to be made available on all of the node VMs under + the "default" service account. + - 'The following scopes are recommended, but not required, and by default are + not included: U(https://www.googleapis.com/auth/compute) is required for mounting + persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for communicating + with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + returned: success + type: list + serviceAccount: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. If no + Service Account is specified, the "default" service account is used. + returned: success + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. Additionally, + to avoid ambiguity, keys must not conflict with any other metadata keys for + the project or be one of the four reserved keys: "instance-template", "kube-env", + "startup-script", and "user-data" Values are free-form strings, and only have + meaning as interpreted by the image running in the instance. The only restriction + placed on them is that each value''s size must be less than or equal to 32 + KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + imageType: + description: + - The image type to use for this node. Note that for a given image type, the + latest version of it will be used. + returned: success + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each node. + These will added in addition to any default label(s) that Kubernetes may apply + to the node. In case of conflict in label keys, the applied set may differ + depending on the Kubernetes version -- it''s best to assume the behavior is + undefined and conflicts should be avoided. For more information, including + usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + localSsdCount: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks available + on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + returned: success + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the client + during cluster or node pool creation. Each tag within the list must comply + with RFC1035. + returned: success + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + returned: success + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the accelerator cards exposed to an instance. + returned: success + type: int + acceleratorType: + description: + - The accelerator type resource name. + returned: success + type: str + diskType: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') If + unspecified, the default disk type is 'pd-standard' . + returned: success + type: str + minCpuPlatform: + description: + - Minimum CPU platform to be used by this instance. The instance may be scheduled + on the specified or newer CPU platform . + returned: success + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + returned: success + type: complex + contains: + key: + description: + - Key for taint. + returned: success + type: str + value: + description: + - Value for taint. + returned: success + type: str + effect: + description: + - Effect for taint. + returned: success + type: str + shieldedInstanceConfig: + description: + - Shielded Instance options. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + - Secure Boot helps ensure that the system only runs authentic software + by verifying the digital signature of all boot components, and halting + the boot process if signature verification fails. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + - Enables monitoring and attestation of the boot integrity of the instance. + - The attestation is performed against the integrity policy baseline. This + baseline is initially derived from the implicitly trusted boot image when + the instance is created. + returned: success + type: bool + workloadMetaConfig: + description: + - WorkloadMetadataConfig defines the metadata configuration to expose to workloads + on the node pool. + returned: success + type: complex + contains: + mode: + description: + - Mode is the configuration for how to expose metadata to workloads running + on the node pool. + returned: success + type: str +initialNodeCount: + description: + - The initial node count for the pool. You must ensure that your Compute Engine + resource quota is sufficient for this number of instances. You must also have + available firewall and routes quota. + returned: success + type: int +status: + description: + - Status of nodes in this pool instance. + returned: success + type: str +statusMessage: + description: + - Additional information about the current status of this node pool instance. + returned: success + type: str +version: + description: + - The version of the Kubernetes of this node. + returned: success + type: str +autoscaling: + description: + - Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid + configuration is present. + returned: success + type: complex + contains: + enabled: + description: + - Is autoscaling enabled for this node pool. + returned: success + type: bool + minNodeCount: + description: + - Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. + returned: success + type: int + maxNodeCount: + description: + - Maximum number of nodes in the NodePool. Must be >= minNodeCount. + - There has to enough quota to scale up the cluster. + returned: success + type: int +management: + description: + - Management configuration for this NodePool. + returned: success + type: complex + contains: + autoUpgrade: + description: + - A flag that specifies whether node auto-upgrade is enabled for the node pool. + If enabled, node auto-upgrade helps keep the nodes in your node pool up to + date with the latest release version of Kubernetes. + returned: success + type: bool + autoRepair: + description: + - A flag that specifies whether the node auto-repair is enabled for the node + pool. If enabled, the nodes in this node pool will be monitored and, if they + fail health checks too many times, an automatic repair action will be triggered. + returned: success + type: bool + upgradeOptions: + description: + - Specifies the Auto Upgrade knobs for the node pool. + returned: success + type: complex + contains: + autoUpgradeStartTime: + description: + - This field is set when upgrades are about to commence with the approximate + start time for the upgrades, in RFC3339 text format. + returned: success + type: str + description: + description: + - This field is set when upgrades are about to commence with the description + of the upgrade. + returned: success + type: str +maxPodsConstraint: + description: + - The constraint on the maximum number of pods that can be run simultaneously on + a node in the node pool. + returned: success + type: complex + contains: + maxPodsPerNode: + description: + - Constraint enforced on the max num of pods per node. + returned: success + type: int +conditions: + description: + - Which conditions caused the current node pool state. + returned: success + type: complex + contains: + code: + description: + - Machine-friendly representation of the condition. + returned: success + type: str +podIpv4CidrSize: + description: + - The pod CIDR block size per node in this node pool. + returned: success + type: int +cluster: + description: + - The cluster this node pool belongs to. + returned: success + type: dict +location: + description: + - The location where the node pool is deployed. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(type='str'), + config=dict( + type='dict', + options=dict( + machine_type=dict(type='str'), + disk_size_gb=dict(type='int'), + oauth_scopes=dict(type='list', elements='str'), + service_account=dict(type='str'), + metadata=dict(type='dict'), + image_type=dict(type='str'), + labels=dict(type='dict'), + local_ssd_count=dict(type='int'), + tags=dict(type='list', elements='str'), + preemptible=dict(type='bool'), + accelerators=dict(type='list', elements='dict', options=dict(accelerator_count=dict(type='int'), accelerator_type=dict(type='str'))), + disk_type=dict(type='str'), + min_cpu_platform=dict(type='str'), + taints=dict(type='list', elements='dict', options=dict(key=dict(type='str'), value=dict(type='str'), effect=dict(type='str'))), + shielded_instance_config=dict( + type='dict', options=dict(enable_secure_boot=dict(type='bool'), enable_integrity_monitoring=dict(type='bool')) + ), + workload_meta_config=dict(type='dict', options=dict(mode=dict(type='str'))), + ), + ), + initial_node_count=dict(required=True, type='int'), + version=dict(type='str'), + autoscaling=dict(type='dict', options=dict(enabled=dict(type='bool'), min_node_count=dict(type='int'), max_node_count=dict(type='int'))), + management=dict( + type='dict', options=dict(auto_upgrade=dict(type='bool'), auto_repair=dict(type='bool'), upgrade_options=dict(type='dict', options=dict())) + ), + max_pods_constraint=dict(type='dict', options=dict(max_pods_per_node=dict(type='int'))), + conditions=dict(type='list', elements='dict', options=dict(code=dict(type='str'))), + cluster=dict(required=True, type='dict'), + location=dict(required=True, type='str', aliases=['region', 'zone']), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'container') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'container') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'container') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'config': NodePoolConfig(module.params.get('config', {}), module).to_request(), + u'initialNodeCount': module.params.get('initial_node_count'), + u'version': module.params.get('version'), + u'autoscaling': NodePoolAutoscaling(module.params.get('autoscaling', {}), module).to_request(), + u'management': NodePoolManagement(module.params.get('management', {}), module).to_request(), + u'maxPodsConstraint': NodePoolMaxpodsconstraint(module.params.get('max_pods_constraint', {}), module).to_request(), + u'conditions': NodePoolConditionsArray(module.params.get('conditions', []), module).to_request(), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'container') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + res = { + 'project': module.params['project'], + 'location': module.params['location'], + 'cluster': replace_resource_dict(module.params['cluster'], 'name'), + 'name': module.params['name'], + } + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{name}".format(**res) + + +def collection(module): + res = {'project': module.params['project'], 'location': module.params['location'], 'cluster': replace_resource_dict(module.params['cluster'], 'name')} + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters/{cluster}/nodePools".format(**res) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'config': NodePoolConfig(response.get(u'config', {}), module).from_response(), + u'initialNodeCount': module.params.get('initial_node_count'), + u'status': response.get(u'status'), + u'statusMessage': response.get(u'statusMessage'), + u'version': module.params.get('version'), + u'autoscaling': NodePoolAutoscaling(response.get(u'autoscaling', {}), module).from_response(), + u'management': NodePoolManagement(response.get(u'management', {}), module).from_response(), + u'maxPodsConstraint': NodePoolMaxpodsconstraint(response.get(u'maxPodsConstraint', {}), module).from_response(), + u'conditions': NodePoolConditionsArray(response.get(u'conditions', []), module).from_response(), + u'podIpv4CidrSize': response.get(u'podIpv4CidrSize'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://container.googleapis.com/v1/projects/{project}/locations/{location}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink'])) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +# Google Container Engine API has its own layout for the create method, +# defined like this: +# +# { +# 'nodePool': { +# ... node pool data +# } +# } +# +# Format the request to match the expected input by the API +def encode_request(resource_request, module): + return {'nodePool': resource_request} + + +class NodePoolConfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'machineType': self.request.get('machine_type'), + u'diskSizeGb': self.request.get('disk_size_gb'), + u'oauthScopes': self.request.get('oauth_scopes'), + u'serviceAccount': self.request.get('service_account'), + u'metadata': self.request.get('metadata'), + u'imageType': self.request.get('image_type'), + u'labels': self.request.get('labels'), + u'localSsdCount': self.request.get('local_ssd_count'), + u'tags': self.request.get('tags'), + u'preemptible': self.request.get('preemptible'), + u'accelerators': NodePoolAcceleratorsArray(self.request.get('accelerators', []), self.module).to_request(), + u'diskType': self.request.get('disk_type'), + u'minCpuPlatform': self.request.get('min_cpu_platform'), + u'taints': NodePoolTaintsArray(self.request.get('taints', []), self.module).to_request(), + u'shieldedInstanceConfig': NodePoolShieldedinstanceconfig(self.request.get('shielded_instance_config', {}), self.module).to_request(), + u'workloadMetaConfig': NodePoolWorkloadmetaconfig(self.request.get('workload_meta_config', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'machineType': self.request.get(u'machineType'), + u'diskSizeGb': self.request.get(u'diskSizeGb'), + u'oauthScopes': self.request.get(u'oauthScopes'), + u'serviceAccount': self.request.get(u'serviceAccount'), + u'metadata': self.request.get(u'metadata'), + u'imageType': self.request.get(u'imageType'), + u'labels': self.request.get(u'labels'), + u'localSsdCount': self.request.get(u'localSsdCount'), + u'tags': self.request.get(u'tags'), + u'preemptible': self.request.get(u'preemptible'), + u'accelerators': NodePoolAcceleratorsArray(self.request.get(u'accelerators', []), self.module).from_response(), + u'diskType': self.request.get(u'diskType'), + u'minCpuPlatform': self.request.get(u'minCpuPlatform'), + u'taints': NodePoolTaintsArray(self.request.get(u'taints', []), self.module).from_response(), + u'shieldedInstanceConfig': NodePoolShieldedinstanceconfig(self.request.get(u'shieldedInstanceConfig', {}), self.module).from_response(), + u'workloadMetaConfig': NodePoolWorkloadmetaconfig(self.request.get(u'workloadMetaConfig', {}), self.module).from_response(), + } + ) + + +class NodePoolAcceleratorsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get('accelerator_count'), u'acceleratorType': item.get('accelerator_type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'acceleratorCount': item.get(u'acceleratorCount'), u'acceleratorType': item.get(u'acceleratorType')}) + + +class NodePoolTaintsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'key': item.get('key'), u'value': item.get('value'), u'effect': item.get('effect')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'key': item.get(u'key'), u'value': item.get(u'value'), u'effect': item.get(u'effect')}) + + +class NodePoolShieldedinstanceconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'enableSecureBoot': self.request.get('enable_secure_boot'), u'enableIntegrityMonitoring': self.request.get('enable_integrity_monitoring')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'enableSecureBoot': self.request.get(u'enableSecureBoot'), u'enableIntegrityMonitoring': self.request.get(u'enableIntegrityMonitoring')} + ) + + +class NodePoolWorkloadmetaconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'mode': self.request.get('mode')}) + + def from_response(self): + return remove_nones_from_dict({u'mode': self.request.get(u'mode')}) + + +class NodePoolAutoscaling(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'enabled': self.request.get('enabled'), u'minNodeCount': self.request.get('min_node_count'), u'maxNodeCount': self.request.get('max_node_count')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'enabled': self.request.get(u'enabled'), u'minNodeCount': self.request.get(u'minNodeCount'), u'maxNodeCount': self.request.get(u'maxNodeCount')} + ) + + +class NodePoolManagement(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'autoUpgrade': self.request.get('auto_upgrade'), + u'autoRepair': self.request.get('auto_repair'), + u'upgradeOptions': NodePoolUpgradeoptions(self.request.get('upgrade_options', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'autoUpgrade': self.request.get(u'autoUpgrade'), + u'autoRepair': self.request.get(u'autoRepair'), + u'upgradeOptions': NodePoolUpgradeoptions(self.request.get(u'upgradeOptions', {}), self.module).from_response(), + } + ) + + +class NodePoolUpgradeoptions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({}) + + def from_response(self): + return remove_nones_from_dict({}) + + +class NodePoolMaxpodsconstraint(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'maxPodsPerNode': self.request.get('max_pods_per_node')}) + + def from_response(self): + return remove_nones_from_dict({u'maxPodsPerNode': self.request.get(u'maxPodsPerNode')}) + + +class NodePoolConditionsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'code': item.get('code')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'code': item.get(u'code')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool_info.py new file mode 100644 index 000000000..03c7ccda3 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_container_node_pool_info.py @@ -0,0 +1,487 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_container_node_pool_info +description: +- Gather info for GCP NodePool +short_description: Gather info for GCP NodePool +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location where the node pool is deployed. + required: true + type: str + aliases: + - region + - zone + cluster: + description: + - The cluster this node pool belongs to. + - 'This field represents a link to a Cluster resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_container_cluster task and then set this cluster field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a node pool + gcp_container_node_pool_info: + cluster: "{{ cluster }}" + location: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the node pool. + returned: success + type: str + config: + description: + - The node configuration of the pool. + returned: success + type: complex + contains: + machineType: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + returned: success + type: str + diskSizeGb: + description: + - Size of the disk attached to each node, specified in GB. The smallest + allowed disk size is 10GB. If unspecified, the default disk size is 100GB. + returned: success + type: int + oauthScopes: + description: + - The set of Google API scopes to be made available on all of the node VMs + under the "default" service account. + - 'The following scopes are recommended, but not required, and by default + are not included: U(https://www.googleapis.com/auth/compute) is required + for mounting persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for + communicating with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + returned: success + type: list + serviceAccount: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. + If no Service Account is specified, the "default" service account is used. + returned: success + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. + Additionally, to avoid ambiguity, keys must not conflict with any other + metadata keys for the project or be one of the four reserved keys: "instance-template", + "kube-env", "startup-script", and "user-data" Values are free-form strings, + and only have meaning as interpreted by the image running in the instance. + The only restriction placed on them is that each value''s size must be + less than or equal to 32 KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + imageType: + description: + - The image type to use for this node. Note that for a given image type, + the latest version of it will be used. + returned: success + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each + node. These will added in addition to any default label(s) that Kubernetes + may apply to the node. In case of conflict in label keys, the applied + set may differ depending on the Kubernetes version -- it''s best to assume + the behavior is undefined and conflicts should be avoided. For more information, + including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + localSsdCount: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks + available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + returned: success + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the + client during cluster or node pool creation. Each tag within the list + must comply with RFC1035. + returned: success + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + returned: success + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the accelerator cards exposed to an instance. + returned: success + type: int + acceleratorType: + description: + - The accelerator type resource name. + returned: success + type: str + diskType: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd-standard' . + returned: success + type: str + minCpuPlatform: + description: + - Minimum CPU platform to be used by this instance. The instance may be + scheduled on the specified or newer CPU platform . + returned: success + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + returned: success + type: complex + contains: + key: + description: + - Key for taint. + returned: success + type: str + value: + description: + - Value for taint. + returned: success + type: str + effect: + description: + - Effect for taint. + returned: success + type: str + shieldedInstanceConfig: + description: + - Shielded Instance options. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + - Secure Boot helps ensure that the system only runs authentic software + by verifying the digital signature of all boot components, and halting + the boot process if signature verification fails. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + - Enables monitoring and attestation of the boot integrity of the instance. + - The attestation is performed against the integrity policy baseline. + This baseline is initially derived from the implicitly trusted boot + image when the instance is created. + returned: success + type: bool + workloadMetaConfig: + description: + - WorkloadMetadataConfig defines the metadata configuration to expose to + workloads on the node pool. + returned: success + type: complex + contains: + mode: + description: + - Mode is the configuration for how to expose metadata to workloads + running on the node pool. + returned: success + type: str + initialNodeCount: + description: + - The initial node count for the pool. You must ensure that your Compute Engine + resource quota is sufficient for this number of instances. You must also have + available firewall and routes quota. + returned: success + type: int + status: + description: + - Status of nodes in this pool instance. + returned: success + type: str + statusMessage: + description: + - Additional information about the current status of this node pool instance. + returned: success + type: str + version: + description: + - The version of the Kubernetes of this node. + returned: success + type: str + autoscaling: + description: + - Autoscaler configuration for this NodePool. Autoscaler is enabled only if + a valid configuration is present. + returned: success + type: complex + contains: + enabled: + description: + - Is autoscaling enabled for this node pool. + returned: success + type: bool + minNodeCount: + description: + - Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. + returned: success + type: int + maxNodeCount: + description: + - Maximum number of nodes in the NodePool. Must be >= minNodeCount. + - There has to enough quota to scale up the cluster. + returned: success + type: int + management: + description: + - Management configuration for this NodePool. + returned: success + type: complex + contains: + autoUpgrade: + description: + - A flag that specifies whether node auto-upgrade is enabled for the node + pool. If enabled, node auto-upgrade helps keep the nodes in your node + pool up to date with the latest release version of Kubernetes. + returned: success + type: bool + autoRepair: + description: + - A flag that specifies whether the node auto-repair is enabled for the + node pool. If enabled, the nodes in this node pool will be monitored and, + if they fail health checks too many times, an automatic repair action + will be triggered. + returned: success + type: bool + upgradeOptions: + description: + - Specifies the Auto Upgrade knobs for the node pool. + returned: success + type: complex + contains: + autoUpgradeStartTime: + description: + - This field is set when upgrades are about to commence with the approximate + start time for the upgrades, in RFC3339 text format. + returned: success + type: str + description: + description: + - This field is set when upgrades are about to commence with the description + of the upgrade. + returned: success + type: str + maxPodsConstraint: + description: + - The constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool. + returned: success + type: complex + contains: + maxPodsPerNode: + description: + - Constraint enforced on the max num of pods per node. + returned: success + type: int + conditions: + description: + - Which conditions caused the current node pool state. + returned: success + type: complex + contains: + code: + description: + - Machine-friendly representation of the condition. + returned: success + type: str + podIpv4CidrSize: + description: + - The pod CIDR block size per node in this node pool. + returned: success + type: int + cluster: + description: + - The cluster this node pool belongs to. + returned: success + type: dict + location: + description: + - The location where the node pool is deployed. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str', aliases=['region', 'zone']), cluster=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'location': module.params['location'], 'cluster': replace_resource_dict(module.params['cluster'], 'name')} + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters/{cluster}/nodePools".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'container') + return auth.list(link, return_if_object, array_name='nodePools') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone.py b/ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone.py new file mode 100644 index 000000000..5f4558090 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone.py @@ -0,0 +1,854 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_dns_managed_zone +description: +- A zone is a subtree of the DNS namespace under one administrative responsibility. + A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service. +short_description: Creates a GCP ManagedZone +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - A mutable string of at most 1024 characters associated with this resource for + the user's convenience. Has no effect on the managed zone's function. + required: true + type: str + dns_name: + description: + - The DNS name of this managed zone, for instance "example.com.". + required: true + type: str + dnssec_config: + description: + - DNSSEC configuration. + required: false + type: dict + suboptions: + kind: + description: + - Identifies what kind of resource this is. + required: false + default: dns#managedZoneDnsSecConfig + type: str + non_existence: + description: + - Specifies the mechanism used to provide authenticated denial-of-existence + responses. + - non_existence can only be updated when the state is `off`. + - 'Some valid choices include: "nsec", "nsec3"' + required: false + type: str + state: + description: + - Specifies whether DNSSEC is enabled, and what mode it is in. + - 'Some valid choices include: "off", "on", "transfer"' + required: false + type: str + default_key_specs: + description: + - Specifies parameters that will be used for generating initial DnsKeys for + this ManagedZone. If you provide a spec for keySigning or zoneSigning, you + must also provide one for the other. + - default_key_specs can only be updated when the state is `off`. + elements: dict + required: false + type: list + suboptions: + algorithm: + description: + - String mnemonic specifying the DNSSEC algorithm of this key. + - 'Some valid choices include: "ecdsap256sha256", "ecdsap384sha384", "rsasha1", + "rsasha256", "rsasha512"' + required: false + type: str + key_length: + description: + - Length of the keys in bits. + required: false + type: int + key_type: + description: + - Specifies whether this is a key signing key (KSK) or a zone signing + key (ZSK). Key signing keys have the Secure Entry Point flag set and, + when active, will only be used to sign resource record sets of type + DNSKEY. Zone signing keys do not have the Secure Entry Point flag set + and will be used to sign all other types of resource record sets. + - 'Some valid choices include: "keySigning", "zoneSigning"' + required: false + type: str + kind: + description: + - Identifies what kind of resource this is. + required: false + default: dns#dnsKeySpec + type: str + name: + description: + - User assigned name for this resource. + - Must be unique within the project. + required: true + type: str + name_server_set: + description: + - Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet + is a set of DNS name servers that all host the same ManagedZones. Most users + will leave this field unset. + required: false + type: str + labels: + description: + - A set of key/value label pairs to assign to this ManagedZone. + required: false + type: dict + visibility: + description: + - 'The zone''s visibility: public zones are exposed to the Internet, while private + zones are visible only to Virtual Private Cloud resources.' + - 'Some valid choices include: "private", "public"' + required: false + default: public + type: str + private_visibility_config: + description: + - For privately visible zones, the set of Virtual Private Cloud resources that + the zone is visible from. + required: false + type: dict + suboptions: + networks: + description: + - The list of VPC networks that can see this zone. + elements: dict + required: true + type: list + suboptions: + network_url: + description: + - The fully qualified URL of the VPC network to bind to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + required: true + type: str + forwarding_config: + description: + - The presence for this field indicates that outbound forwarding is enabled for + this zone. The value of this field contains the set of destinations to forward + to. + required: false + type: dict + suboptions: + target_name_servers: + description: + - List of target name servers to forward to. Cloud DNS will select the best + available name server if more than one target is given. + elements: dict + required: true + type: list + suboptions: + ipv4_address: + description: + - IPv4 address of a target name server. + required: true + type: str + forwarding_path: + description: + - Forwarding path for this TargetNameServer. If unset or `default` Cloud + DNS will make forwarding decision based on address ranges, i.e. RFC1918 + addresses go to the VPC, Non-RFC1918 addresses go to the Internet. When + set to `private`, Cloud DNS will always send queries through VPC for + this target . + - 'Some valid choices include: "default", "private"' + required: false + type: str + peering_config: + description: + - The presence of this field indicates that DNS Peering is enabled for this zone. + The value of this field contains the network to peer with. + required: false + type: dict + suboptions: + target_network: + description: + - The network with which to peer. + required: true + type: dict + suboptions: + network_url: + description: + - The fully qualified URL of the VPC network to forward queries to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/dns/api/v1/managedZones)' +- 'Managing Zones: U(https://cloud.google.com/dns/zones/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a managed zone + google.cloud.gcp_dns_managed_zone: + name: test_object + dns_name: test.somewild2.example.com. + description: test zone + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +description: + description: + - A mutable string of at most 1024 characters associated with this resource for + the user's convenience. Has no effect on the managed zone's function. + returned: success + type: str +dnsName: + description: + - The DNS name of this managed zone, for instance "example.com.". + returned: success + type: str +dnssecConfig: + description: + - DNSSEC configuration. + returned: success + type: complex + contains: + kind: + description: + - Identifies what kind of resource this is. + returned: success + type: str + nonExistence: + description: + - Specifies the mechanism used to provide authenticated denial-of-existence + responses. + - non_existence can only be updated when the state is `off`. + returned: success + type: str + state: + description: + - Specifies whether DNSSEC is enabled, and what mode it is in. + returned: success + type: str + defaultKeySpecs: + description: + - Specifies parameters that will be used for generating initial DnsKeys for + this ManagedZone. If you provide a spec for keySigning or zoneSigning, you + must also provide one for the other. + - default_key_specs can only be updated when the state is `off`. + returned: success + type: complex + contains: + algorithm: + description: + - String mnemonic specifying the DNSSEC algorithm of this key. + returned: success + type: str + keyLength: + description: + - Length of the keys in bits. + returned: success + type: int + keyType: + description: + - Specifies whether this is a key signing key (KSK) or a zone signing key + (ZSK). Key signing keys have the Secure Entry Point flag set and, when + active, will only be used to sign resource record sets of type DNSKEY. + Zone signing keys do not have the Secure Entry Point flag set and will + be used to sign all other types of resource record sets. + returned: success + type: str + kind: + description: + - Identifies what kind of resource this is. + returned: success + type: str +id: + description: + - Unique identifier for the resource; defined by the server. + returned: success + type: int +name: + description: + - User assigned name for this resource. + - Must be unique within the project. + returned: success + type: str +nameServers: + description: + - Delegate your managed_zone to these virtual name servers; defined by the server + . + returned: success + type: list +nameServerSet: + description: + - Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is + a set of DNS name servers that all host the same ManagedZones. Most users will + leave this field unset. + returned: success + type: str +creationTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str +labels: + description: + - A set of key/value label pairs to assign to this ManagedZone. + returned: success + type: dict +visibility: + description: + - 'The zone''s visibility: public zones are exposed to the Internet, while private + zones are visible only to Virtual Private Cloud resources.' + returned: success + type: str +privateVisibilityConfig: + description: + - For privately visible zones, the set of Virtual Private Cloud resources that the + zone is visible from. + returned: success + type: complex + contains: + networks: + description: + - The list of VPC networks that can see this zone. + returned: success + type: complex + contains: + networkUrl: + description: + - The fully qualified URL of the VPC network to bind to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + returned: success + type: str +forwardingConfig: + description: + - The presence for this field indicates that outbound forwarding is enabled for + this zone. The value of this field contains the set of destinations to forward + to. + returned: success + type: complex + contains: + targetNameServers: + description: + - List of target name servers to forward to. Cloud DNS will select the best + available name server if more than one target is given. + returned: success + type: complex + contains: + ipv4Address: + description: + - IPv4 address of a target name server. + returned: success + type: str + forwardingPath: + description: + - Forwarding path for this TargetNameServer. If unset or `default` Cloud + DNS will make forwarding decision based on address ranges, i.e. RFC1918 + addresses go to the VPC, Non-RFC1918 addresses go to the Internet. When + set to `private`, Cloud DNS will always send queries through VPC for this + target . + returned: success + type: str +peeringConfig: + description: + - The presence of this field indicates that DNS Peering is enabled for this zone. + The value of this field contains the network to peer with. + returned: success + type: complex + contains: + targetNetwork: + description: + - The network with which to peer. + returned: success + type: complex + contains: + networkUrl: + description: + - The fully qualified URL of the VPC network to forward queries to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + description=dict(required=True, type='str'), + dns_name=dict(required=True, type='str'), + dnssec_config=dict( + type='dict', + options=dict( + kind=dict(default='dns#managedZoneDnsSecConfig', type='str'), + non_existence=dict(type='str'), + state=dict(type='str'), + default_key_specs=dict( + type='list', + elements='dict', + options=dict( + algorithm=dict(type='str'), key_length=dict(type='int'), key_type=dict(type='str'), kind=dict(default='dns#dnsKeySpec', type='str') + ), + ), + ), + ), + name=dict(required=True, type='str'), + name_server_set=dict(type='str'), + labels=dict(type='dict'), + visibility=dict(default='public', type='str'), + private_visibility_config=dict( + type='dict', options=dict(networks=dict(required=True, type='list', elements='dict', options=dict(network_url=dict(required=True, type='str')))) + ), + forwarding_config=dict( + type='dict', + options=dict( + target_name_servers=dict( + required=True, + type='list', + elements='dict', + options=dict(ipv4_address=dict(required=True, type='str'), forwarding_path=dict(type='str')), + ) + ), + ), + peering_config=dict( + type='dict', options=dict(target_network=dict(required=True, type='dict', options=dict(network_url=dict(required=True, type='str')))) + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] + + state = module.params['state'] + kind = 'dns#managedZone' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'dns') + return return_if_object(module, auth.post(link, resource_to_request(module)), kind) + + +def update(module, link, kind): + auth = GcpSession(module, 'dns') + return return_if_object(module, auth.patch(link, resource_to_request(module)), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'dns') + return return_if_object(module, auth.delete(link), kind) + + +def resource_to_request(module): + request = { + u'kind': 'dns#managedZone', + u'description': module.params.get('description'), + u'dnsName': module.params.get('dns_name'), + u'dnssecConfig': ManagedZoneDnssecconfig(module.params.get('dnssec_config', {}), module).to_request(), + u'name': module.params.get('name'), + u'nameServerSet': module.params.get('name_server_set'), + u'labels': module.params.get('labels'), + u'visibility': module.params.get('visibility'), + u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(module.params.get('private_visibility_config', {}), module).to_request(), + u'forwardingConfig': ManagedZoneForwardingconfig(module.params.get('forwarding_config', {}), module).to_request(), + u'peeringConfig': ManagedZonePeeringconfig(module.params.get('peering_config', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'dns') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://dns.googleapis.com/dns/v1/projects/{project}/managedZones/{name}".format(**module.params) + + +def collection(module): + return "https://dns.googleapis.com/dns/v1/projects/{project}/managedZones".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'description': response.get(u'description'), + u'dnsName': module.params.get('dns_name'), + u'dnssecConfig': ManagedZoneDnssecconfig(response.get(u'dnssecConfig', {}), module).from_response(), + u'id': response.get(u'id'), + u'name': module.params.get('name'), + u'nameServers': response.get(u'nameServers'), + u'nameServerSet': module.params.get('name_server_set'), + u'creationTime': response.get(u'creationTime'), + u'labels': response.get(u'labels'), + u'visibility': module.params.get('visibility'), + u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(response.get(u'privateVisibilityConfig', {}), module).from_response(), + u'forwardingConfig': ManagedZoneForwardingconfig(response.get(u'forwardingConfig', {}), module).from_response(), + u'peeringConfig': ManagedZonePeeringconfig(response.get(u'peeringConfig', {}), module).from_response(), + } + + +class ManagedZoneDnssecconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'kind': self.request.get('kind'), + u'nonExistence': self.request.get('non_existence'), + u'state': self.request.get('state'), + u'defaultKeySpecs': ManagedZoneDefaultkeyspecsArray(self.request.get('default_key_specs', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'kind': self.request.get(u'kind'), + u'nonExistence': self.request.get(u'nonExistence'), + u'state': self.request.get(u'state'), + u'defaultKeySpecs': ManagedZoneDefaultkeyspecsArray(self.request.get(u'defaultKeySpecs', []), self.module).from_response(), + } + ) + + +class ManagedZoneDefaultkeyspecsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + {u'algorithm': item.get('algorithm'), u'keyLength': item.get('key_length'), u'keyType': item.get('key_type'), u'kind': item.get('kind')} + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'algorithm': item.get(u'algorithm'), u'keyLength': item.get(u'keyLength'), u'keyType': item.get(u'keyType'), u'kind': item.get(u'kind')} + ) + + +class ManagedZonePrivatevisibilityconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'networks': ManagedZoneNetworksArray(self.request.get('networks', []), self.module).to_request()}) + + def from_response(self): + return remove_nones_from_dict({u'networks': ManagedZoneNetworksArray(self.request.get(u'networks', []), self.module).from_response()}) + + +class ManagedZoneNetworksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'networkUrl': item.get('network_url')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'networkUrl': item.get(u'networkUrl')}) + + +class ManagedZoneForwardingconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'targetNameServers': ManagedZoneTargetnameserversArray(self.request.get('target_name_servers', []), self.module).to_request()} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'targetNameServers': ManagedZoneTargetnameserversArray(self.request.get(u'targetNameServers', []), self.module).from_response()} + ) + + +class ManagedZoneTargetnameserversArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'ipv4Address': item.get('ipv4_address'), u'forwardingPath': item.get('forwarding_path')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'ipv4Address': item.get(u'ipv4Address'), u'forwardingPath': item.get(u'forwardingPath')}) + + +class ManagedZonePeeringconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'targetNetwork': ManagedZoneTargetnetwork(self.request.get('target_network', {}), self.module).to_request()}) + + def from_response(self): + return remove_nones_from_dict({u'targetNetwork': ManagedZoneTargetnetwork(self.request.get(u'targetNetwork', {}), self.module).from_response()}) + + +class ManagedZoneTargetnetwork(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'networkUrl': self.request.get('network_url')}) + + def from_response(self): + return remove_nones_from_dict({u'networkUrl': self.request.get(u'networkUrl')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone_info.py new file mode 100644 index 000000000..c0905abe8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_dns_managed_zone_info.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_dns_managed_zone_info +description: +- Gather info for GCP ManagedZone +short_description: Gather info for GCP ManagedZone +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + dns_name: + description: + - Restricts the list to return only zones with this domain name. + type: list + elements: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a managed zone + gcp_dns_managed_zone_info: + dns_name: test.somewild2.example.com. + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - A mutable string of at most 1024 characters associated with this resource + for the user's convenience. Has no effect on the managed zone's function. + returned: success + type: str + dnsName: + description: + - The DNS name of this managed zone, for instance "example.com.". + returned: success + type: str + dnssecConfig: + description: + - DNSSEC configuration. + returned: success + type: complex + contains: + kind: + description: + - Identifies what kind of resource this is. + returned: success + type: str + nonExistence: + description: + - Specifies the mechanism used to provide authenticated denial-of-existence + responses. + - non_existence can only be updated when the state is `off`. + returned: success + type: str + state: + description: + - Specifies whether DNSSEC is enabled, and what mode it is in. + returned: success + type: str + defaultKeySpecs: + description: + - Specifies parameters that will be used for generating initial DnsKeys + for this ManagedZone. If you provide a spec for keySigning or zoneSigning, + you must also provide one for the other. + - default_key_specs can only be updated when the state is `off`. + returned: success + type: complex + contains: + algorithm: + description: + - String mnemonic specifying the DNSSEC algorithm of this key. + returned: success + type: str + keyLength: + description: + - Length of the keys in bits. + returned: success + type: int + keyType: + description: + - Specifies whether this is a key signing key (KSK) or a zone signing + key (ZSK). Key signing keys have the Secure Entry Point flag set and, + when active, will only be used to sign resource record sets of type + DNSKEY. Zone signing keys do not have the Secure Entry Point flag + set and will be used to sign all other types of resource record sets. + returned: success + type: str + kind: + description: + - Identifies what kind of resource this is. + returned: success + type: str + id: + description: + - Unique identifier for the resource; defined by the server. + returned: success + type: int + name: + description: + - User assigned name for this resource. + - Must be unique within the project. + returned: success + type: str + nameServers: + description: + - Delegate your managed_zone to these virtual name servers; defined by the server + . + returned: success + type: list + nameServerSet: + description: + - Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet + is a set of DNS name servers that all host the same ManagedZones. Most users + will leave this field unset. + returned: success + type: str + creationTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str + labels: + description: + - A set of key/value label pairs to assign to this ManagedZone. + returned: success + type: dict + visibility: + description: + - 'The zone''s visibility: public zones are exposed to the Internet, while private + zones are visible only to Virtual Private Cloud resources.' + returned: success + type: str + privateVisibilityConfig: + description: + - For privately visible zones, the set of Virtual Private Cloud resources that + the zone is visible from. + returned: success + type: complex + contains: + networks: + description: + - The list of VPC networks that can see this zone. + returned: success + type: complex + contains: + networkUrl: + description: + - The fully qualified URL of the VPC network to bind to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + returned: success + type: str + forwardingConfig: + description: + - The presence for this field indicates that outbound forwarding is enabled + for this zone. The value of this field contains the set of destinations to + forward to. + returned: success + type: complex + contains: + targetNameServers: + description: + - List of target name servers to forward to. Cloud DNS will select the best + available name server if more than one target is given. + returned: success + type: complex + contains: + ipv4Address: + description: + - IPv4 address of a target name server. + returned: success + type: str + forwardingPath: + description: + - Forwarding path for this TargetNameServer. If unset or `default` Cloud + DNS will make forwarding decision based on address ranges, i.e. RFC1918 + addresses go to the VPC, Non-RFC1918 addresses go to the Internet. + When set to `private`, Cloud DNS will always send queries through + VPC for this target . + returned: success + type: str + peeringConfig: + description: + - The presence of this field indicates that DNS Peering is enabled for this + zone. The value of this field contains the network to peer with. + returned: success + type: complex + contains: + targetNetwork: + description: + - The network with which to peer. + returned: success + type: complex + contains: + networkUrl: + description: + - The fully qualified URL of the VPC network to forward queries to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(dns_name=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] + + return_value = {'resources': fetch_list(module, collection(module), module.params['dns_name'])} + module.exit_json(**return_value) + + +def collection(module): + return "https://dns.googleapis.com/dns/v1/projects/{project}/managedZones".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'dns') + return auth.list(link, return_if_object, array_name='managedZones', params={'dnsName': query}) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set.py b/ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set.py new file mode 100644 index 000000000..cfa2058a9 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set.py @@ -0,0 +1,498 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_dns_resource_record_set +description: +- A single DNS record that exists on a domain name (i.e. in a managed zone). +- This record defines the information about the domain and where the domain / subdomains + direct to. +- The record will include the domain/subdomain name, a type (i.e. A, AAA, CAA, MX, + CNAME, NS, etc) . +short_description: Creates a GCP ResourceRecordSet +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - For example, U(www.example.com). + required: true + type: str + type: + description: + - One of valid DNS resource types. + - 'Some valid choices include: "A", "AAAA", "CAA", "CNAME", "DNSKEY", "DS", "IPSECVPNKEY", + "MX", "NAPTR", "NS", "PTR", "SOA", "SPF", "SRV", "SSHFP", "TLSA", "TXT"' + required: true + type: str + ttl: + description: + - Number of seconds that this ResourceRecordSet can be cached by resolvers. + required: false + type: int + target: + description: + - As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) . + elements: str + required: false + type: list + managed_zone: + description: + - Identifies the managed zone addressed by this request. This must be a dictionary + that contains both a 'name' key and a 'dnsName' key. You can pass in the results + of the gcp_dns_managed_zone module, which will contain both. + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a managed zone + google.cloud.gcp_dns_managed_zone: + name: managedzone-rrs + dns_name: testzone-4.com. + description: test zone + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: managed_zone + +- name: create a resource record set + google.cloud.gcp_dns_resource_record_set: + name: www.testzone-4.com. + managed_zone: "{{ managed_zone }}" + type: A + ttl: 600 + target: + - 10.1.2.3 + - 40.5.6.7 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - For example, U(www.example.com). + returned: success + type: str +type: + description: + - One of valid DNS resource types. + returned: success + type: str +ttl: + description: + - Number of seconds that this ResourceRecordSet can be cached by resolvers. + returned: success + type: int +target: + description: + - As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) . + returned: success + type: list +managed_zone: + description: + - Identifies the managed zone addressed by this request. This must be a dictionary + that contains both a 'name' key and a 'dnsName' key. You can pass in the results + of the gcp_dns_managed_zone module, which will contain both. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import copy +import datetime +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + type=dict(required=True, type='str'), + ttl=dict(type='int'), + target=dict(type='list', elements='str'), + managed_zone=dict(required=True, type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] + + state = module.params['state'] + kind = 'dns#resourceRecordSet' + + fetch = fetch_wrapped_resource(module, 'dns#resourceRecordSet', 'dns#resourceRecordSetsListResponse', 'rrsets') + changed = False + + if 'dnsName' not in module.params.get('managed_zone') or 'name' not in module.params.get('managed_zone'): + module.fail_json(msg="managed_zone dictionary must contain both the name of the zone and the dns name of the zone") + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind, fetch) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + change = create_change(None, updated_record(module), module) + change_id = int(change['id']) + if change['status'] == 'pending': + wait_for_change_to_complete(change_id, module) + return fetch_wrapped_resource(module, 'dns#resourceRecordSet', 'dns#resourceRecordSetsListResponse', 'rrsets') + + +def update(module, link, kind, fetch): + change = create_change(fetch, updated_record(module), module) + change_id = int(change['id']) + if change['status'] == 'pending': + wait_for_change_to_complete(change_id, module) + return fetch_wrapped_resource(module, 'dns#resourceRecordSet', 'dns#resourceRecordSetsListResponse', 'rrsets') + + +def delete(module, link, kind, fetch): + change = create_change(fetch, None, module) + change_id = int(change['id']) + if change['status'] == 'pending': + wait_for_change_to_complete(change_id, module) + return fetch_wrapped_resource(module, 'dns#resourceRecordSet', 'dns#resourceRecordSetsListResponse', 'rrsets') + + +def resource_to_request(module): + request = { + u'kind': 'dns#resourceRecordSet', + u'name': module.params.get('name'), + u'type': module.params.get('type'), + u'ttl': module.params.get('ttl'), + u'rrdatas': module.params.get('target'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'dns') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def fetch_wrapped_resource(module, kind, wrap_kind, wrap_path): + result = fetch_resource(module, self_link(module), wrap_kind) + if result is None or wrap_path not in result: + return None + + result = unwrap_resource(result[wrap_path], module) + + if result is None: + return None + + if result['kind'] != kind: + module.fail_json(msg="Incorrect result: {kind}".format(**result)) + + return result + + +def self_link(module): + res = { + 'project': module.params['project'], + 'managed_zone': replace_resource_dict(module.params['managed_zone'], 'name'), + 'name': module.params['name'], + 'type': module.params['type'], + } + return "https://dns.googleapis.com/dns/v1/projects/{project}/managedZones/{managed_zone}/rrsets?name={name}&type={type}".format(**res) + + +def collection(module): + res = {'project': module.params['project'], 'managed_zone': replace_resource_dict(module.params['managed_zone'], 'name')} + return "https://dns.googleapis.com/dns/v1/projects/{project}/managedZones/{managed_zone}/changes".format(**res) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'name': response.get(u'name'), u'type': response.get(u'type'), u'ttl': response.get(u'ttl'), u'rrdatas': response.get(u'rrdatas')} + + +def updated_record(module): + return { + 'kind': 'dns#resourceRecordSet', + 'name': module.params['name'], + 'type': module.params['type'], + 'ttl': module.params['ttl'] if module.params['ttl'] else 900, + 'rrdatas': module.params['target'], + } + + +def unwrap_resource(result, module): + if not result: + return None + return result[0] + + +class SOAForwardable(object): + def __init__(self, params, module): + self.params = params + self.module = module + + def fail_json(self, *args, **kwargs): + self.module.fail_json(*args, **kwargs) + + def raise_for_status(self, *args, **kwargs): + self.module.raise_for_status(*args, **kwargs) + + +def prefetch_soa_resource(module): + + resource = SOAForwardable( + { + 'type': 'SOA', + 'managed_zone': module.params['managed_zone'], + 'name': replace_resource_dict(module.params['managed_zone'], 'dnsName'), + 'project': module.params['project'], + 'scopes': module.params['scopes'], + 'service_account_file': module.params.get('service_account_file'), + 'auth_kind': module.params['auth_kind'], + 'service_account_email': module.params.get('service_account_email'), + 'service_account_contents': module.params.get('service_account_contents'), + }, + module, + ) + + result = fetch_wrapped_resource(resource, 'dns#resourceRecordSet', 'dns#resourceRecordSetsListResponse', 'rrsets') + if not result: + raise ValueError("Google DNS Managed Zone %s not found" % replace_resource_dict(module.params['managed_zone'], 'name')) + return result + + +def create_change(original, updated, module): + auth = GcpSession(module, 'dns') + return return_if_change_object(module, auth.post(collection(module), resource_to_change_request(original, updated, module))) + + +# Fetch current SOA. We need the last SOA so we can increment its serial +def update_soa(module): + original_soa = prefetch_soa_resource(module) + + # Create a clone of the SOA record so we can update it + updated_soa = copy.deepcopy(original_soa) + + soa_parts = updated_soa['rrdatas'][0].split(' ') + soa_parts[2] = str(int(soa_parts[2]) + 1) + updated_soa['rrdatas'][0] = ' '.join(soa_parts) + return [original_soa, updated_soa] + + +def resource_to_change_request(original_record, updated_record, module): + original_soa, updated_soa = update_soa(module) + result = new_change_request() + add_additions(result, updated_soa, updated_record) + add_deletions(result, original_soa, original_record) + return result + + +def add_additions(result, updated_soa, updated_record): + if updated_soa: + result['additions'].append(updated_soa) + if updated_record: + result['additions'].append(updated_record) + + +def add_deletions(result, original_soa, original_record): + if original_soa: + result['deletions'].append(original_soa) + + if original_record: + result['deletions'].append(original_record) + + +# TODO(nelsonjr): Merge and delete this code once async operation +# declared in api.yaml +def wait_for_change_to_complete(change_id, module): + status = 'pending' + while status == 'pending': + status = get_change_status(change_id, module) + if status != 'done': + time.sleep(0.5) + + +def get_change_status(change_id, module): + auth = GcpSession(module, 'dns') + link = collection(module) + "/%s" % change_id + return return_if_change_object(module, auth.get(link))['status'] + + +def new_change_request(): + return {'kind': 'dns#change', 'additions': [], 'deletions': [], 'start_time': datetime.datetime.now().isoformat()} + + +def return_if_change_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + if response.status_code == 204: + return None + + try: + response.raise_for_status() + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if result['kind'] != 'dns#change': + module.fail_json(msg="Invalid result: %s" % result['kind']) + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set_info.py new file mode 100644 index 000000000..5fafd645f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_dns_resource_record_set_info.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_dns_resource_record_set_info +description: +- Gather info for GCP ResourceRecordSet +short_description: Gather info for GCP ResourceRecordSet +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + managed_zone: + description: + - Identifies the managed zone addressed by this request. This must be a dictionary + that contains both a 'name' key and a 'dnsName' key. You can pass in the results + of the gcp_dns_managed_zone module, which will contain both. + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a resource record set + gcp_dns_resource_record_set_info: + managed_zone: "{{ managed_zone }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - For example, U(www.example.com). + returned: success + type: str + type: + description: + - One of valid DNS resource types. + returned: success + type: str + ttl: + description: + - Number of seconds that this ResourceRecordSet can be cached by resolvers. + returned: success + type: int + target: + description: + - As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) . + returned: success + type: list + managed_zone: + description: + - Identifies the managed zone addressed by this request. This must be a dictionary + that contains both a 'name' key and a 'dnsName' key. You can pass in the results + of the gcp_dns_managed_zone module, which will contain both. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(managed_zone=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'managed_zone': replace_resource_dict(module.params['managed_zone'], 'name')} + return "https://dns.googleapis.com/dns/v1/projects/{project}/managedZones/{managed_zone}/rrsets".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'dns') + return auth.list(link, return_if_object, array_name='rrsets') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance.py new file mode 100644 index 000000000..7028ffeb4 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance.py @@ -0,0 +1,576 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_filestore_instance +description: +- A Google Cloud Filestore instance. +short_description: Creates a GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The resource name of the instance. + required: true + type: str + description: + description: + - A description of the instance. + required: false + type: str + tier: + description: + - The service tier of the instance. + - 'Some valid choices include: "TIER_UNSPECIFIED", "STANDARD", "PREMIUM", "BASIC_HDD", + "BASIC_SSD", "HIGH_SCALE_SSD"' + required: true + type: str + labels: + description: + - Resource labels to represent user-provided metadata. + required: false + type: dict + file_shares: + description: + - File system shares on the instance. For this version, only a single file share + is supported. + elements: dict + required: true + type: list + suboptions: + name: + description: + - The name of the fileshare (16 characters or less) . + required: true + type: str + capacity_gb: + description: + - File share capacity in GiB. This must be at least 1024 GiB for the standard + tier, or 2560 GiB for the premium tier. + required: true + type: int + networks: + description: + - VPC networks to which the instance is connected. For this version, only a single + network is supported. + elements: dict + required: true + type: list + suboptions: + network: + description: + - The name of the GCE VPC network to which the instance is connected. + required: true + type: str + modes: + description: + - IP versions for which the instance has IP addresses assigned. + elements: str + required: true + type: list + reserved_ip_range: + description: + - A /29 CIDR block that identifies the range of IP addresses reserved for + this instance. + required: false + type: str + zone: + description: + - The name of the Filestore zone of the instance. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create)' +- 'Official Documentation: U(https://cloud.google.com/filestore/docs/creating-instances)' +- 'Use with Kubernetes: U(https://cloud.google.com/filestore/docs/csi-driver)' +- 'Copying Data In/Out: U(https://cloud.google.com/filestore/docs/copying-data)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_filestore_instance: + name: test_object + zone: us-central1-b + tier: PREMIUM + file_shares: + - capacity_gb: 2660 + name: share1 + networks: + - network: default + modes: + - MODE_IPV4 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The resource name of the instance. + returned: success + type: str +description: + description: + - A description of the instance. + returned: success + type: str +createTime: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str +tier: + description: + - The service tier of the instance. + returned: success + type: str +labels: + description: + - Resource labels to represent user-provided metadata. + returned: success + type: dict +fileShares: + description: + - File system shares on the instance. For this version, only a single file share + is supported. + returned: success + type: complex + contains: + name: + description: + - The name of the fileshare (16 characters or less) . + returned: success + type: str + capacityGb: + description: + - File share capacity in GiB. This must be at least 1024 GiB for the standard + tier, or 2560 GiB for the premium tier. + returned: success + type: int +networks: + description: + - VPC networks to which the instance is connected. For this version, only a single + network is supported. + returned: success + type: complex + contains: + network: + description: + - The name of the GCE VPC network to which the instance is connected. + returned: success + type: str + modes: + description: + - IP versions for which the instance has IP addresses assigned. + returned: success + type: list + reservedIpRange: + description: + - A /29 CIDR block that identifies the range of IP addresses reserved for this + instance. + returned: success + type: str + ipAddresses: + description: + - A list of IPv4 or IPv6 addresses. + returned: success + type: list +etag: + description: + - Server-specified ETag for the instance resource to prevent simultaneous updates + from overwriting each other. + returned: success + type: str +zone: + description: + - The name of the Filestore zone of the instance. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + tier=dict(required=True, type='str'), + labels=dict(type='dict'), + file_shares=dict( + required=True, type='list', elements='dict', options=dict(name=dict(required=True, type='str'), capacity_gb=dict(required=True, type='int')) + ), + networks=dict( + required=True, + type='list', + elements='dict', + options=dict( + network=dict(required=True, type='str'), modes=dict(required=True, type='list', elements='str'), reserved_ip_range=dict(type='str') + ), + ), + zone=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'filestore') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'filestore') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + return wait_for_operation(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('description') != response.get('description'): + update_mask.append('description') + if request.get('labels') != response.get('labels'): + update_mask.append('labels') + if request.get('fileShares') != response.get('fileShares'): + update_mask.append('fileShares') + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'filestore') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'description': module.params.get('description'), + u'tier': module.params.get('tier'), + u'labels': module.params.get('labels'), + u'fileShares': InstanceFilesharesArray(module.params.get('file_shares', []), module).to_request(), + u'networks': InstanceNetworksArray(module.params.get('networks', []), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'filestore') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://file.googleapis.com/v1/projects/{project}/locations/{zone}/instances/{name}".format(**module.params) + + +def collection(module): + return "https://file.googleapis.com/v1/projects/{project}/locations/{zone}/instances".format(**module.params) + + +def create_link(module): + return "https://file.googleapis.com/v1/projects/{project}/locations/{zone}/instances?instanceId={name}".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'createTime': response.get(u'createTime'), + u'tier': module.params.get('tier'), + u'labels': response.get(u'labels'), + u'fileShares': InstanceFilesharesArray(response.get(u'fileShares', []), module).from_response(), + u'networks': InstanceNetworksArray(module.params.get('networks', []), module).to_request(), + u'etag': response.get(u'etag'), + } + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/locations/.*/instances/.*" + + if not re.match(regex, name): + name = "projects/{project}/locations/{zone}/instances/{name}".format(**module.params) + + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://file.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class InstanceFilesharesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'capacityGb': item.get('capacity_gb')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'capacityGb': item.get(u'capacityGb')}) + + +class InstanceNetworksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'network': item.get('network'), u'modes': item.get('modes'), u'reservedIpRange': item.get('reserved_ip_range')}) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'network': self.module.params.get('network'), u'modes': self.module.params.get('modes'), u'reservedIpRange': item.get(u'reservedIpRange')} + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance_info.py new file mode 100644 index 000000000..713fcc6f9 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_filestore_instance_info.py @@ -0,0 +1,249 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_filestore_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + zone: + description: + - The name of the Filestore zone of the instance. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_filestore_instance_info: + zone: us-central1-b + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The resource name of the instance. + returned: success + type: str + description: + description: + - A description of the instance. + returned: success + type: str + createTime: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + tier: + description: + - The service tier of the instance. + returned: success + type: str + labels: + description: + - Resource labels to represent user-provided metadata. + returned: success + type: dict + fileShares: + description: + - File system shares on the instance. For this version, only a single file share + is supported. + returned: success + type: complex + contains: + name: + description: + - The name of the fileshare (16 characters or less) . + returned: success + type: str + capacityGb: + description: + - File share capacity in GiB. This must be at least 1024 GiB for the standard + tier, or 2560 GiB for the premium tier. + returned: success + type: int + networks: + description: + - VPC networks to which the instance is connected. For this version, only a + single network is supported. + returned: success + type: complex + contains: + network: + description: + - The name of the GCE VPC network to which the instance is connected. + returned: success + type: str + modes: + description: + - IP versions for which the instance has IP addresses assigned. + returned: success + type: list + reservedIpRange: + description: + - A /29 CIDR block that identifies the range of IP addresses reserved for + this instance. + returned: success + type: str + ipAddresses: + description: + - A list of IPv4 or IPv6 addresses. + returned: success + type: list + etag: + description: + - Server-specified ETag for the instance resource to prevent simultaneous updates + from overwriting each other. + returned: success + type: str + zone: + description: + - The name of the Filestore zone of the instance. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://file.googleapis.com/v1/projects/{project}/locations/{zone}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'filestore') + return auth.list(link, return_if_object, array_name='instances') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_iam_role.py b/ansible_collections/google/cloud/plugins/modules/gcp_iam_role.py new file mode 100644 index 000000000..a87aa58f2 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_iam_role.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: gcp_iam_role +description: +- A role in the Identity and Access Management API . +short_description: Creates a GCP Role +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name of the role. + required: true + type: str + title: + description: + - A human-readable title for the role. Typically this is limited to 100 UTF-8 + bytes. + required: false + type: str + description: + description: + - Human-readable description for the role. + required: false + type: str + included_permissions: + description: + - Names of permissions this role grants when bound in an IAM policy. + elements: str + required: false + type: list + stage: + description: + - The current launch stage of the role. + - 'Some valid choices include: "ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", + "EAP"' + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +""" + +EXAMPLES = """ +- name: create a role + google.cloud.gcp_iam_role: + name: myCustomRole2 + title: My Custom Role + description: My custom role description + included_permissions: + - iam.roles.list + - iam.roles.create + - iam.roles.delete + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +""" + +RETURN = """ +name: + description: + - The name of the role. + returned: success + type: str +title: + description: + - A human-readable title for the role. Typically this is limited to 100 UTF-8 bytes. + returned: success + type: str +description: + description: + - Human-readable description for the role. + returned: success + type: str +includedPermissions: + description: + - Names of permissions this role grants when bound in an IAM policy. + returned: success + type: list +stage: + description: + - The current launch stage of the role. + returned: success + type: str +deleted: + description: + - The current deleted state of the role. + returned: success + type: bool +""" + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"], type="str"), + name=dict(required=True, type="str"), + title=dict(type="str"), + description=dict(type="str"), + included_permissions=dict(type="list", elements="str"), + stage=dict(type="str"), + ) + ) + + if not module.params["scopes"]: + module.params["scopes"] = ["https://www.googleapis.com/auth/iam"] + + state = module.params["state"] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == "present": + if fetch.get("deleted"): + undelete(module, self_link(module), fetch["etag"]) + changed = True + elif is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + elif not fetch.get("deleted"): + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == "present": + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({"changed": changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, "iam") + return return_if_object(module, auth.post(link, resource_to_create(module))) + + +def undelete(module, link, etag): + auth = GcpSession(module, "iam") + return return_if_object(module, auth.post(link + ":undelete", { + "etag": etag + })) + + +def update(module, link, fetch): + auth = GcpSession(module, "iam") + params = { + "updateMask": updateMask( + resource_to_request(module), response_to_hash(module, fetch) + ) + } + request = resource_to_request(module) + del request["name"] + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get("name") != response.get("name"): + update_mask.append("name") + if request.get("title") != response.get("title"): + update_mask.append("title") + if request.get("description") != response.get("description"): + update_mask.append("description") + if request.get("includedPermissions") != response.get("includedPermissions"): + update_mask.append("includedPermissions") + if request.get("stage") != response.get("stage"): + update_mask.append("stage") + return ",".join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, "iam") + return return_if_object(module, auth.delete(link), allow_not_found=True) + + +def resource_to_request(module): + request = { + "name": module.params.get("name"), + "title": module.params.get("title"), + "description": module.params.get("description"), + "includedPermissions": module.params.get("included_permissions"), + "stage": module.params.get("stage"), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, "iam") + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://iam.googleapis.com/v1/projects/{project}/roles/{name}".format( + **module.params + ) + + +def collection(module): + return "https://iam.googleapis.com/v1/projects/{project}/roles".format( + **module.params + ) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + # catches and edge case specific to IAM roles where the role not + # existing returns 400. + if (allow_not_found and response.status_code == 400 + and "You can't delete role_id" in response.text): + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, "JSONDecodeError", ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ["error", "errors"]): + module.fail_json(msg=navigate_hash(result, ["error", "errors"])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + "name": response.get("name"), + "title": response.get("title"), + "description": response.get("description"), + "includedPermissions": response.get("includedPermissions"), + "stage": response.get("stage"), + "deleted": response.get("deleted"), + } + + +def resource_to_create(module): + role = resource_to_request(module) + del role["name"] + return {"roleId": module.params["name"], "role": role} + + +def decode_response(response, module): + if "name" in response: + response["name"] = response["name"].split("/")[-1] + return response + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_iam_role_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_iam_role_info.py new file mode 100644 index 000000000..de791b251 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_iam_role_info.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_iam_role_info +description: +- Gather info for GCP Role +short_description: Gather info for GCP Role +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a role + gcp_iam_role_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the role. + returned: success + type: str + title: + description: + - A human-readable title for the role. Typically this is limited to 100 UTF-8 + bytes. + returned: success + type: str + description: + description: + - Human-readable description for the role. + returned: success + type: str + includedPermissions: + description: + - Names of permissions this role grants when bound in an IAM policy. + returned: success + type: list + stage: + description: + - The current launch stage of the role. + returned: success + type: str + deleted: + description: + - The current deleted state of the role. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/iam'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://iam.googleapis.com/v1/projects/{project}/roles".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'iam') + return auth.list(link, return_if_object, array_name='roles') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account.py b/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account.py new file mode 100644 index 000000000..fa930145a --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_iam_service_account +description: +- A service account in the Identity and Access Management API. +short_description: Creates a GCP ServiceAccount +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name of the service account. + required: false + type: str + display_name: + description: + - User specified description of service account. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a service account + google.cloud.gcp_iam_service_account: + name: sa-{{ resource_name.split("-")[-1] }}@graphite-playground.google.com.iam.gserviceaccount.com + display_name: My Ansible test key + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name of the service account. + returned: success + type: str +projectId: + description: + - Id of the project that owns the service account. + returned: success + type: str +uniqueId: + description: + - Unique and stable id of the service account. + returned: success + type: str +email: + description: + - Email address of the service account. + returned: success + type: str +displayName: + description: + - User specified description of service account. + returned: success + type: str +oauth2ClientId: + description: + - OAuth2 client id for the service account. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict(state=dict(default='present', choices=['present', 'absent'], type='str'), name=dict(type='str'), display_name=dict(type='str')) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/iam'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'iam') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'iam') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'iam') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = {u'name': module.params.get('name'), u'displayName': module.params.get('display_name')} + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'iam') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts/{name}".format(**module.params) + + +def collection(module): + return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'projectId': response.get(u'projectId'), + u'uniqueId': response.get(u'uniqueId'), + u'email': response.get(u'email'), + u'displayName': response.get(u'displayName'), + u'oauth2ClientId': response.get(u'oauth2ClientId'), + } + + +def encode_request(resource_request, module): + """Structures the request as accountId + rest of request""" + account_id = resource_request['name'].split('@')[0] + del resource_request['name'] + return {'accountId': account_id, 'serviceAccount': resource_request} + + +def decode_response(response, module): + """Unstructures the request from accountId + rest of request""" + if 'name' not in response: + return response + response['name'] = response['name'].split('/')[-1] + return response + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_info.py new file mode 100644 index 000000000..5a0d94e13 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_iam_service_account_info +description: +- Gather info for GCP ServiceAccount +short_description: Gather info for GCP ServiceAccount +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a service account + gcp_iam_service_account_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the service account. + returned: success + type: str + projectId: + description: + - Id of the project that owns the service account. + returned: success + type: str + uniqueId: + description: + - Unique and stable id of the service account. + returned: success + type: str + email: + description: + - Email address of the service account. + returned: success + type: str + displayName: + description: + - User specified description of service account. + returned: success + type: str + oauth2ClientId: + description: + - OAuth2 client id for the service account. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/iam'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'iam') + return auth.list(link, return_if_object, array_name='accounts') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_key.py b/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_key.py new file mode 100644 index 000000000..633fd7492 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_iam_service_account_key.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_iam_service_account_key +description: +- A service account in the Identity and Access Management API. +short_description: Creates a GCP ServiceAccountKey +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + private_key_type: + description: + - Output format for the service account key. + - 'Some valid choices include: "TYPE_UNSPECIFIED", "TYPE_PKCS12_FILE", "TYPE_GOOGLE_CREDENTIALS_FILE"' + required: false + type: str + key_algorithm: + description: + - Specifies the algorithm for the key. + - 'Some valid choices include: "KEY_ALG_UNSPECIFIED", "KEY_ALG_RSA_1024", "KEY_ALG_RSA_2048"' + required: false + type: str + service_account: + description: + - The name of the serviceAccount. + - 'This field represents a link to a ServiceAccount resource in GCP. It can be + specified in two ways. First, you can place a dictionary with key ''name'' and + value of your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_iam_service_account task and then set this service_account field to + "{{ name-of-resource }}"' + required: false + type: dict + path: + description: + - The full name of the file that will hold the service account private key. The + management of this file will depend on the value of sync_file parameter. + - File path must be absolute. + required: false + type: path + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a service account + google.cloud.gcp_iam_service_account: + name: test-ansible@graphite-playground.google.com.iam.gserviceaccount.com + display_name: My Ansible test key + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: serviceaccount + +- name: create a service account key + google.cloud.gcp_iam_service_account_key: + service_account: "{{ serviceaccount }}" + private_key_type: TYPE_GOOGLE_CREDENTIALS_FILE + path: "~/test_account.json" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name of the key. + returned: success + type: str +privateKeyType: + description: + - Output format for the service account key. + returned: success + type: str +keyAlgorithm: + description: + - Specifies the algorithm for the key. + returned: success + type: str +privateKeyData: + description: + - Private key data. Base-64 encoded. + returned: success + type: str +publicKeyData: + description: + - Public key data. Base-64 encoded. + returned: success + type: str +validAfterTime: + description: + - Key can only be used after this time. + returned: success + type: str +validBeforeTime: + description: + - Key can only be used before this time. + returned: success + type: str +keyType: + description: + - Specifies the type of the key. Possible values include KEY_TYPE_UNSPECIFIED, USER_MANAGED + and SYSTEM_MANAGED . + returned: success + type: str +serviceAccount: + description: + - The name of the serviceAccount. + returned: success + type: dict +path: + description: + - The full name of the file that will hold the service account private key. The + management of this file will depend on the value of sync_file parameter. + - File path must be absolute. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +from ansible.module_utils._text import to_native +import json +import os +import mimetypes +import hashlib +import base64 + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + private_key_type=dict(type='str'), + key_algorithm=dict(type='str'), + service_account=dict(type='dict'), + path=dict(type='path'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/iam'] + + state = module.params['state'] + + # If file exists, we're doing a no-op or deleting the key. + changed = False + if os.path.isfile(module.params['path']): + fetch = fetch_resource(module) + # If file exists and we should delete the file, delete it. + if fetch and module.params['state'] == 'absent': + delete(module) + changed = True + + # Create the file if present state and no current file. + elif module.params['state'] == 'present': + create(module) + changed = True + + # Not returning any information about the key because that information should + # end up in logs. + module.exit_json(**{'changed': changed, 'file_path': module.params['path']}) + + +def create(module): + auth = GcpSession(module, 'iam') + json_content = return_if_object(module, auth.post(self_link(module), resource_to_request(module))) + with open(module.params['path'], 'w') as f: + private_key_contents = to_native(base64.b64decode(json_content['privateKeyData'])) + f.write(private_key_contents) + + +def delete(module): + auth = GcpSession(module, 'iam') + return return_if_object(module, auth.delete(self_link_from_file(module))) + + +def resource_to_request(module): + request = {u'privateKeyType': module.params.get('private_key_type'), u'keyAlgorithm': module.params.get('key_algorithm')} + return_vals = {} + for k, v in request.items(): + if v: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module): + auth = GcpSession(module, 'iam') + return return_if_object(module, auth.get(self_link_from_file(module))) + + +def key_name_from_file(filename, module): + with open(filename, 'r') as f: + try: + json_data = json.loads(f.read()) + return "projects/{project_id}/serviceAccounts/{client_email}/keys/{private_key_id}".format(**json_data) + except ValueError as inst: + module.fail_json(msg="File is not a valid GCP JSON service account key") + + +def self_link_from_file(module): + key_name = key_name_from_file(module.params['path'], module) + return "https://iam.googleapis.com/v1/{key_name}".format(key_name=key_name) + + +def self_link(module): + results = {'project': module.params['project'], 'service_account': replace_resource_dict(module.params['service_account'], 'name')} + return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts/{service_account}/keys".format(**results) + + +def return_if_object(module, response): + # If not found, return nothing. + # return_if_object not used in any context where 404 means error. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key.py b/ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key.py new file mode 100644 index 000000000..bdd6fbc25 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_kms_crypto_key +description: +- A `CryptoKey` represents a logical key that can be used for cryptographic operations. +short_description: Creates a GCP CryptoKey +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The resource name for the CryptoKey. + required: true + type: str + labels: + description: + - Labels with user-defined metadata to apply to this resource. + required: false + type: dict + purpose: + description: + - Immutable purpose of CryptoKey. See U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) + for inputs. + - 'Some valid choices include: "ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"' + required: false + default: ENCRYPT_DECRYPT + type: str + rotation_period: + description: + - Every time this period passes, generate a new CryptoKeyVersion and set it as + the primary. + - The first rotation will take place after the specified period. The rotation + period has the format of a decimal number with up to 9 fractional digits, followed + by the letter `s` (seconds). It must be greater than a day (ie, 86400). + required: false + type: str + version_template: + description: + - A template describing settings for new crypto key versions. + required: false + type: dict + suboptions: + algorithm: + description: + - The algorithm to use when creating a version based on this template. + - See the [algorithm reference](U(https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm)) + for possible inputs. + required: true + type: str + protection_level: + description: + - The protection level to use when creating a version based on this template. + - 'Some valid choices include: "SOFTWARE", "HSM"' + required: false + type: str + key_ring: + description: + - The KeyRing that this key belongs to. + - 'Format: `''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''`.' + required: true + type: str + skip_initial_version_creation: + description: + - If set to true, the request will create a CryptoKey without any CryptoKeyVersions. + You must use the `google_kms_key_ring_import_job` resource to import the CryptoKeyVersion. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys)' +- 'Creating a key: U(https://cloud.google.com/kms/docs/creating-keys#create_a_key)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a key ring + google.cloud.gcp_kms_key_ring: + name: key-key-ring + location: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: keyring + +- name: create a crypto key + google.cloud.gcp_kms_crypto_key: + name: test_object + key_ring: projects/{{ gcp_project }}/locations/us-central1/keyRings/key-key-ring + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The resource name for the CryptoKey. + returned: success + type: str +createTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str +labels: + description: + - Labels with user-defined metadata to apply to this resource. + returned: success + type: dict +purpose: + description: + - Immutable purpose of CryptoKey. See U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) + for inputs. + returned: success + type: str +rotationPeriod: + description: + - Every time this period passes, generate a new CryptoKeyVersion and set it as the + primary. + - The first rotation will take place after the specified period. The rotation period + has the format of a decimal number with up to 9 fractional digits, followed by + the letter `s` (seconds). It must be greater than a day (ie, 86400). + returned: success + type: str +versionTemplate: + description: + - A template describing settings for new crypto key versions. + returned: success + type: complex + contains: + algorithm: + description: + - The algorithm to use when creating a version based on this template. + - See the [algorithm reference](U(https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm)) + for possible inputs. + returned: success + type: str + protectionLevel: + description: + - The protection level to use when creating a version based on this template. + returned: success + type: str +nextRotationTime: + description: + - The time when KMS will create a new version of this Crypto Key. + returned: success + type: str +keyRing: + description: + - The KeyRing that this key belongs to. + - 'Format: `''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''`.' + returned: success + type: str +skipInitialVersionCreation: + description: + - If set to true, the request will create a CryptoKey without any CryptoKeyVersions. + You must use the `google_kms_key_ring_import_job` resource to import the CryptoKeyVersion. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + labels=dict(type='dict'), + purpose=dict(default='ENCRYPT_DECRYPT', type='str'), + rotation_period=dict(type='str'), + version_template=dict(type='dict', options=dict(algorithm=dict(required=True, type='str'), protection_level=dict(type='str'))), + key_ring=dict(required=True, type='str'), + skip_initial_version_creation=dict(type='bool', default=False), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudkms'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'kms') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'kms') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('labels') != response.get('labels'): + update_mask.append('labels') + if request.get('rotationPeriod') != response.get('rotationPeriod'): + update_mask.append('rotationPeriod') + if request.get('versionTemplate') != response.get('versionTemplate'): + update_mask.append('versionTemplate') + return ','.join(update_mask) + + +def delete(module, link): + module.fail_json(msg="KeyRings cannot be deleted") + + +def resource_to_request(module): + request = { + u'labels': module.params.get('labels'), + u'purpose': module.params.get('purpose'), + u'rotationPeriod': module.params.get('rotation_period'), + u'versionTemplate': CryptoKeyVersiontemplate(module.params.get('version_template', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'kms') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys/{name}".format(**module.params) + + +def collection(module): + return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys".format(**module.params) + + +def create_link(module): + return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys?cryptoKeyId={name}&skipInitialVersionCreation={skip_initial_version_creation}".format( + **module.params + ) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': module.params.get('name'), + u'createTime': response.get(u'createTime'), + u'labels': response.get(u'labels'), + u'purpose': module.params.get('purpose'), + u'rotationPeriod': response.get(u'rotationPeriod'), + u'versionTemplate': CryptoKeyVersiontemplate(response.get(u'versionTemplate', {}), module).from_response(), + u'nextRotationTime': response.get(u'nextRotationTime'), + } + + +def decode_response(response, module): + if 'name' in response: + response['name'] = response['name'].split('/')[-1] + return response + + +class CryptoKeyVersiontemplate(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'algorithm': self.request.get('algorithm'), u'protectionLevel': self.request.get('protection_level')}) + + def from_response(self): + return remove_nones_from_dict({u'algorithm': self.request.get(u'algorithm'), u'protectionLevel': self.module.params.get('protection_level')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key_info.py new file mode 100644 index 000000000..24e98a964 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_kms_crypto_key_info.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_kms_crypto_key_info +description: +- Gather info for GCP CryptoKey +short_description: Gather info for GCP CryptoKey +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + key_ring: + description: + - The KeyRing that this key belongs to. + - 'Format: `''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''`.' + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a crypto key + gcp_kms_crypto_key_info: + key_ring: projects/{{ gcp_project }}/locations/us-central1/keyRings/key-key-ring + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The resource name for the CryptoKey. + returned: success + type: str + createTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str + labels: + description: + - Labels with user-defined metadata to apply to this resource. + returned: success + type: dict + purpose: + description: + - Immutable purpose of CryptoKey. See U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) + for inputs. + returned: success + type: str + rotationPeriod: + description: + - Every time this period passes, generate a new CryptoKeyVersion and set it + as the primary. + - The first rotation will take place after the specified period. The rotation + period has the format of a decimal number with up to 9 fractional digits, + followed by the letter `s` (seconds). It must be greater than a day (ie, 86400). + returned: success + type: str + versionTemplate: + description: + - A template describing settings for new crypto key versions. + returned: success + type: complex + contains: + algorithm: + description: + - The algorithm to use when creating a version based on this template. + - See the [algorithm reference](U(https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm)) + for possible inputs. + returned: success + type: str + protectionLevel: + description: + - The protection level to use when creating a version based on this template. + returned: success + type: str + nextRotationTime: + description: + - The time when KMS will create a new version of this Crypto Key. + returned: success + type: str + keyRing: + description: + - The KeyRing that this key belongs to. + - 'Format: `''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''`.' + returned: success + type: str + skipInitialVersionCreation: + description: + - If set to true, the request will create a CryptoKey without any CryptoKeyVersions. + You must use the `google_kms_key_ring_import_job` resource to import the CryptoKeyVersion. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(key_ring=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudkms'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudkms.googleapis.com/v1/{key_ring}/cryptoKeys".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'kms') + return auth.list(link, return_if_object, array_name='cryptoKeys') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring.py b/ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring.py new file mode 100644 index 000000000..23bab157d --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_kms_key_ring +description: +- A `KeyRing` is a toplevel logical grouping of `CryptoKeys`. +short_description: Creates a GCP KeyRing +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The resource name for the KeyRing. + required: true + type: str + location: + description: + - The location for the KeyRing. + - A full list of valid locations can be found by running `gcloud kms locations + list`. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings)' +- 'Creating a key ring: U(https://cloud.google.com/kms/docs/creating-keys#create_a_key_ring)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a key ring + google.cloud.gcp_kms_key_ring: + name: test_object + location: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The resource name for the KeyRing. + returned: success + type: str +createTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str +location: + description: + - The location for the KeyRing. + - A full list of valid locations can be found by running `gcloud kms locations list`. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + location=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudkms'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'kms') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + delete(module, self_link(module)) + create(module, create_link(module)) + + +def delete(module, link): + module.fail_json(msg="KeyRings cannot be deleted") + + +def resource_to_request(module): + request = {u'name': module.params.get('name')} + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'kms') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings/{name}".format(**module.params) + + +def collection(module): + return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings".format(**module.params) + + +def create_link(module): + return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings?keyRingId={name}".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'name': response.get(u'name'), u'createTime': response.get(u'createTime')} + + +def decode_response(response, module): + if 'name' in response: + response['name'] = response['name'].split('/')[-1] + return response + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring_info.py new file mode 100644 index 000000000..01e8fadb0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_kms_key_ring_info.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_kms_key_ring_info +description: +- Gather info for GCP KeyRing +short_description: Gather info for GCP KeyRing +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location for the KeyRing. + - A full list of valid locations can be found by running `gcloud kms locations + list`. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a key ring + gcp_kms_key_ring_info: + location: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The resource name for the KeyRing. + returned: success + type: str + createTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str + location: + description: + - The location for the KeyRing. + - A full list of valid locations can be found by running `gcloud kms locations + list`. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudkms'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'kms') + return auth.list(link, return_if_object, array_name='keyRings') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_logging_metric.py b/ansible_collections/google/cloud/plugins/modules/gcp_logging_metric.py new file mode 100644 index 000000000..5d4740ce8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_logging_metric.py @@ -0,0 +1,823 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_logging_metric +description: +- Logs-based metric can also be used to extract values from logs and create a a distribution + of the values. The distribution records the statistics of the extracted values along + with an optional histogram of the values as specified by the bucket options. +short_description: Creates a GCP Metric +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The client-assigned metric identifier. Examples - "error_count", "nginx/requests". + - Metric identifiers are limited to 100 characters and can include only the following + characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash + character (/) denotes a hierarchy of name pieces, and it cannot be the first + character of the name. + required: true + type: str + description: + description: + - A description of this metric, which is used in documentation. The maximum length + of the description is 8000 characters. + required: false + type: str + filter: + description: + - An advanced logs filter (U(https://cloud.google.com/logging/docs/view/advanced-filters)) + which is used to match log entries. + required: true + type: str + metric_descriptor: + description: + - The metric descriptor associated with the logs-based metric. + required: true + type: dict + suboptions: + unit: + description: + - The unit in which the metric value is reported. It is only applicable if + the valueType is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The supported units + are a subset of [The Unified Code for Units of Measure](U(http://unitsofmeasure.org/ucum.html)) + standard . + required: false + default: '1' + type: str + value_type: + description: + - Whether the measurement is an integer, a floating-point number, etc. + - Some combinations of metricKind and valueType might not be supported. + - For counter metrics, set this to INT64. + - 'Some valid choices include: "BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", + "MONEY"' + required: true + type: str + metric_kind: + description: + - Whether the metric records instantaneous values, changes to a value, etc. + - Some combinations of metricKind and valueType might not be supported. + - For counter metrics, set this to DELTA. + - 'Some valid choices include: "DELTA", "GAUGE", "CUMULATIVE"' + required: true + type: str + labels: + description: + - The set of labels that can be used to describe a specific instance of this + metric type. For example, the appengine.googleapis.com/http/server/response_latencies + metric type has a label for the HTTP response code, response_code, so you + can look at latencies for successful responses or just for responses that + failed. + elements: dict + required: false + type: list + suboptions: + key: + description: + - The label key. + required: true + type: str + description: + description: + - A human-readable description for the label. + required: false + type: str + value_type: + description: + - The type of data that can be assigned to the label. + - 'Some valid choices include: "BOOL", "INT64", "STRING"' + required: false + default: STRING + type: str + display_name: + description: + - A concise name for the metric, which can be displayed in user interfaces. + Use sentence case without an ending period, for example "Request count". + This field is optional but it is recommended to be set for any metrics associated + with user-visible concepts, such as Quota. + required: false + type: str + label_extractors: + description: + - A map from a label key string to an extractor expression which is used to extract + data from a log entry field and assign as the label value. Each label key specified + in the LabelDescriptor must have an associated extractor expression in this + map. The syntax of the extractor expression is the same as for the valueExtractor + field. + required: false + type: dict + value_extractor: + description: + - A valueExtractor is required when using a distribution logs-based metric to + extract the values to record from a log entry. Two functions are supported for + value extraction - EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument + are 1. field - The name of the log entry field from which the value is to be + extracted. 2. regex - A regular expression using the Google RE2 syntax (U(https://github.com/google/re2/wiki/Syntax)) + with a single capture group to extract data from the specified log entry field. + The value of the field is converted to a string before applying the regex. It + is an error to specify a regex that does not include exactly one capture group. + required: false + type: str + bucket_options: + description: + - The bucketOptions are required when the logs-based metric is using a DISTRIBUTION + value type and it describes the bucket boundaries used to create a histogram + of the extracted values. + required: false + type: dict + suboptions: + linear_buckets: + description: + - Specifies a linear sequence of buckets that all have the same width (except + overflow and underflow). + - Each bucket represents a constant absolute uncertainty on the specific value + in the bucket. + required: false + type: dict + suboptions: + num_finite_buckets: + description: + - Must be greater than 0. + required: false + type: int + width: + description: + - Must be greater than 0. + required: false + type: int + offset: + description: + - Lower bound of the first bucket. + required: false + type: str + exponential_buckets: + description: + - Specifies an exponential sequence of buckets that have a width that is proportional + to the value of the lower bound. Each bucket represents a constant relative + uncertainty on a specific value in the bucket. + required: false + type: dict + suboptions: + num_finite_buckets: + description: + - Must be greater than 0. + required: false + type: int + growth_factor: + description: + - Must be greater than 1. + required: false + type: str + scale: + description: + - Must be greater than 0. + required: false + type: str + explicit_buckets: + description: + - Specifies a set of buckets with arbitrary widths. + required: false + type: dict + suboptions: + bounds: + description: + - The values must be monotonically increasing. + elements: str + required: true + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create)' +- 'Official Documentation: U(https://cloud.google.com/logging/docs/apis)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a metric + google.cloud.gcp_logging_metric: + name: test_object + filter: resource.type=gae_app AND severity>=ERROR + metric_descriptor: + metric_kind: DELTA + value_type: DISTRIBUTION + unit: '1' + labels: + - key: mass + value_type: STRING + description: amount of matter + value_extractor: EXTRACT(jsonPayload.request) + label_extractors: + mass: EXTRACT(jsonPayload.request) + bucket_options: + linear_buckets: + num_finite_buckets: 3 + width: 1 + offset: 1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The client-assigned metric identifier. Examples - "error_count", "nginx/requests". + - Metric identifiers are limited to 100 characters and can include only the following + characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash + character (/) denotes a hierarchy of name pieces, and it cannot be the first character + of the name. + returned: success + type: str +description: + description: + - A description of this metric, which is used in documentation. The maximum length + of the description is 8000 characters. + returned: success + type: str +filter: + description: + - An advanced logs filter (U(https://cloud.google.com/logging/docs/view/advanced-filters)) + which is used to match log entries. + returned: success + type: str +metricDescriptor: + description: + - The metric descriptor associated with the logs-based metric. + returned: success + type: complex + contains: + unit: + description: + - The unit in which the metric value is reported. It is only applicable if the + valueType is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The supported units are + a subset of [The Unified Code for Units of Measure](U(http://unitsofmeasure.org/ucum.html)) + standard . + returned: success + type: str + valueType: + description: + - Whether the measurement is an integer, a floating-point number, etc. + - Some combinations of metricKind and valueType might not be supported. + - For counter metrics, set this to INT64. + returned: success + type: str + metricKind: + description: + - Whether the metric records instantaneous values, changes to a value, etc. + - Some combinations of metricKind and valueType might not be supported. + - For counter metrics, set this to DELTA. + returned: success + type: str + labels: + description: + - The set of labels that can be used to describe a specific instance of this + metric type. For example, the appengine.googleapis.com/http/server/response_latencies + metric type has a label for the HTTP response code, response_code, so you + can look at latencies for successful responses or just for responses that + failed. + returned: success + type: complex + contains: + key: + description: + - The label key. + returned: success + type: str + description: + description: + - A human-readable description for the label. + returned: success + type: str + valueType: + description: + - The type of data that can be assigned to the label. + returned: success + type: str + displayName: + description: + - A concise name for the metric, which can be displayed in user interfaces. + Use sentence case without an ending period, for example "Request count". This + field is optional but it is recommended to be set for any metrics associated + with user-visible concepts, such as Quota. + returned: success + type: str + type: + description: + - The metric type, including its DNS name prefix. The type is not URL-encoded. + - All user-defined metric types have the DNS name `custom.googleapis.com` or + `external.googleapis.com`. + returned: success + type: str +labelExtractors: + description: + - A map from a label key string to an extractor expression which is used to extract + data from a log entry field and assign as the label value. Each label key specified + in the LabelDescriptor must have an associated extractor expression in this map. + The syntax of the extractor expression is the same as for the valueExtractor field. + returned: success + type: dict +valueExtractor: + description: + - A valueExtractor is required when using a distribution logs-based metric to extract + the values to record from a log entry. Two functions are supported for value extraction + - EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are 1. field - + The name of the log entry field from which the value is to be extracted. 2. regex + - A regular expression using the Google RE2 syntax (U(https://github.com/google/re2/wiki/Syntax)) + with a single capture group to extract data from the specified log entry field. + The value of the field is converted to a string before applying the regex. It + is an error to specify a regex that does not include exactly one capture group. + returned: success + type: str +bucketOptions: + description: + - The bucketOptions are required when the logs-based metric is using a DISTRIBUTION + value type and it describes the bucket boundaries used to create a histogram of + the extracted values. + returned: success + type: complex + contains: + linearBuckets: + description: + - Specifies a linear sequence of buckets that all have the same width (except + overflow and underflow). + - Each bucket represents a constant absolute uncertainty on the specific value + in the bucket. + returned: success + type: complex + contains: + numFiniteBuckets: + description: + - Must be greater than 0. + returned: success + type: int + width: + description: + - Must be greater than 0. + returned: success + type: int + offset: + description: + - Lower bound of the first bucket. + returned: success + type: str + exponentialBuckets: + description: + - Specifies an exponential sequence of buckets that have a width that is proportional + to the value of the lower bound. Each bucket represents a constant relative + uncertainty on a specific value in the bucket. + returned: success + type: complex + contains: + numFiniteBuckets: + description: + - Must be greater than 0. + returned: success + type: int + growthFactor: + description: + - Must be greater than 1. + returned: success + type: str + scale: + description: + - Must be greater than 0. + returned: success + type: str + explicitBuckets: + description: + - Specifies a set of buckets with arbitrary widths. + returned: success + type: complex + contains: + bounds: + description: + - The values must be monotonically increasing. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + filter=dict(required=True, type='str'), + metric_descriptor=dict( + required=True, + type='dict', + options=dict( + unit=dict(default='1', type='str'), + value_type=dict(required=True, type='str'), + metric_kind=dict(required=True, type='str'), + labels=dict( + type='list', + elements='dict', + options=dict(key=dict(required=True, type='str'), description=dict(type='str'), value_type=dict(default='STRING', type='str')), + ), + display_name=dict(type='str'), + ), + ), + label_extractors=dict(type='dict'), + value_extractor=dict(type='str'), + bucket_options=dict( + type='dict', + options=dict( + linear_buckets=dict(type='dict', options=dict(num_finite_buckets=dict(type='int'), width=dict(type='int'), offset=dict(type='str'))), + exponential_buckets=dict( + type='dict', options=dict(num_finite_buckets=dict(type='int'), growth_factor=dict(type='str'), scale=dict(type='str')) + ), + explicit_buckets=dict(type='dict', options=dict(bounds=dict(required=True, type='list', elements='str'))), + ), + ), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'logging') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'logging') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'logging') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'filter': module.params.get('filter'), + u'metricDescriptor': MetricMetricdescriptor(module.params.get('metric_descriptor', {}), module).to_request(), + u'labelExtractors': module.params.get('label_extractors'), + u'valueExtractor': module.params.get('value_extractor'), + u'bucketOptions': MetricBucketoptions(module.params.get('bucket_options', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'logging') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://logging.googleapis.com/v2/projects/{project}/metrics/{name}".format(**module.params) + + +def collection(module): + return "https://logging.googleapis.com/v2/projects/{project}/metrics".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'filter': response.get(u'filter'), + u'metricDescriptor': MetricMetricdescriptor(response.get(u'metricDescriptor', {}), module).from_response(), + u'labelExtractors': response.get(u'labelExtractors'), + u'valueExtractor': response.get(u'valueExtractor'), + u'bucketOptions': MetricBucketoptions(response.get(u'bucketOptions', {}), module).from_response(), + } + + +class MetricMetricdescriptor(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'unit': self.request.get('unit'), + u'valueType': self.request.get('value_type'), + u'metricKind': self.request.get('metric_kind'), + u'labels': MetricLabelsArray(self.request.get('labels', []), self.module).to_request(), + u'displayName': self.request.get('display_name'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'unit': self.request.get(u'unit'), + u'valueType': self.request.get(u'valueType'), + u'metricKind': self.request.get(u'metricKind'), + u'labels': MetricLabelsArray(self.request.get(u'labels', []), self.module).from_response(), + u'displayName': self.request.get(u'displayName'), + } + ) + + +class MetricLabelsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'key': item.get('key'), u'description': item.get('description'), u'valueType': item.get('value_type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'key': self.module.params.get('key'), u'description': item.get(u'description'), u'valueType': self.module.params.get('value_type')} + ) + + +class MetricBucketoptions(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'linearBuckets': MetricLinearbuckets(self.request.get('linear_buckets', {}), self.module).to_request(), + u'exponentialBuckets': MetricExponentialbuckets(self.request.get('exponential_buckets', {}), self.module).to_request(), + u'explicitBuckets': MetricExplicitbuckets(self.request.get('explicit_buckets', {}), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'linearBuckets': MetricLinearbuckets(self.request.get(u'linearBuckets', {}), self.module).from_response(), + u'exponentialBuckets': MetricExponentialbuckets(self.request.get(u'exponentialBuckets', {}), self.module).from_response(), + u'explicitBuckets': MetricExplicitbuckets(self.request.get(u'explicitBuckets', {}), self.module).from_response(), + } + ) + + +class MetricLinearbuckets(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'numFiniteBuckets': self.request.get('num_finite_buckets'), u'width': self.request.get('width'), u'offset': self.request.get('offset')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'numFiniteBuckets': self.request.get(u'numFiniteBuckets'), u'width': self.request.get(u'width'), u'offset': self.request.get(u'offset')} + ) + + +class MetricExponentialbuckets(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'numFiniteBuckets': self.request.get('num_finite_buckets'), + u'growthFactor': self.request.get('growth_factor'), + u'scale': self.request.get('scale'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'numFiniteBuckets': self.request.get(u'numFiniteBuckets'), + u'growthFactor': self.request.get(u'growthFactor'), + u'scale': self.request.get(u'scale'), + } + ) + + +class MetricExplicitbuckets(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'bounds': self.request.get('bounds')}) + + def from_response(self): + return remove_nones_from_dict({u'bounds': self.request.get(u'bounds')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_logging_metric_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_logging_metric_info.py new file mode 100644 index 000000000..482a84047 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_logging_metric_info.py @@ -0,0 +1,338 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_logging_metric_info +description: +- Gather info for GCP Metric +short_description: Gather info for GCP Metric +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a metric + gcp_logging_metric_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The client-assigned metric identifier. Examples - "error_count", "nginx/requests". + - Metric identifiers are limited to 100 characters and can include only the + following characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. + The forward-slash character (/) denotes a hierarchy of name pieces, and it + cannot be the first character of the name. + returned: success + type: str + description: + description: + - A description of this metric, which is used in documentation. The maximum + length of the description is 8000 characters. + returned: success + type: str + filter: + description: + - An advanced logs filter (U(https://cloud.google.com/logging/docs/view/advanced-filters)) + which is used to match log entries. + returned: success + type: str + metricDescriptor: + description: + - The metric descriptor associated with the logs-based metric. + returned: success + type: complex + contains: + unit: + description: + - The unit in which the metric value is reported. It is only applicable + if the valueType is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The supported + units are a subset of [The Unified Code for Units of Measure](U(http://unitsofmeasure.org/ucum.html)) + standard . + returned: success + type: str + valueType: + description: + - Whether the measurement is an integer, a floating-point number, etc. + - Some combinations of metricKind and valueType might not be supported. + - For counter metrics, set this to INT64. + returned: success + type: str + metricKind: + description: + - Whether the metric records instantaneous values, changes to a value, etc. + - Some combinations of metricKind and valueType might not be supported. + - For counter metrics, set this to DELTA. + returned: success + type: str + labels: + description: + - The set of labels that can be used to describe a specific instance of + this metric type. For example, the appengine.googleapis.com/http/server/response_latencies + metric type has a label for the HTTP response code, response_code, so + you can look at latencies for successful responses or just for responses + that failed. + returned: success + type: complex + contains: + key: + description: + - The label key. + returned: success + type: str + description: + description: + - A human-readable description for the label. + returned: success + type: str + valueType: + description: + - The type of data that can be assigned to the label. + returned: success + type: str + displayName: + description: + - A concise name for the metric, which can be displayed in user interfaces. + Use sentence case without an ending period, for example "Request count". + This field is optional but it is recommended to be set for any metrics + associated with user-visible concepts, such as Quota. + returned: success + type: str + type: + description: + - The metric type, including its DNS name prefix. The type is not URL-encoded. + - All user-defined metric types have the DNS name `custom.googleapis.com` + or `external.googleapis.com`. + returned: success + type: str + labelExtractors: + description: + - A map from a label key string to an extractor expression which is used to + extract data from a log entry field and assign as the label value. Each label + key specified in the LabelDescriptor must have an associated extractor expression + in this map. The syntax of the extractor expression is the same as for the + valueExtractor field. + returned: success + type: dict + valueExtractor: + description: + - A valueExtractor is required when using a distribution logs-based metric to + extract the values to record from a log entry. Two functions are supported + for value extraction - EXTRACT(field) or REGEXP_EXTRACT(field, regex). The + argument are 1. field - The name of the log entry field from which the value + is to be extracted. 2. regex - A regular expression using the Google RE2 syntax + (U(https://github.com/google/re2/wiki/Syntax)) with a single capture group + to extract data from the specified log entry field. The value of the field + is converted to a string before applying the regex. It is an error to specify + a regex that does not include exactly one capture group. + returned: success + type: str + bucketOptions: + description: + - The bucketOptions are required when the logs-based metric is using a DISTRIBUTION + value type and it describes the bucket boundaries used to create a histogram + of the extracted values. + returned: success + type: complex + contains: + linearBuckets: + description: + - Specifies a linear sequence of buckets that all have the same width (except + overflow and underflow). + - Each bucket represents a constant absolute uncertainty on the specific + value in the bucket. + returned: success + type: complex + contains: + numFiniteBuckets: + description: + - Must be greater than 0. + returned: success + type: int + width: + description: + - Must be greater than 0. + returned: success + type: int + offset: + description: + - Lower bound of the first bucket. + returned: success + type: str + exponentialBuckets: + description: + - Specifies an exponential sequence of buckets that have a width that is + proportional to the value of the lower bound. Each bucket represents a + constant relative uncertainty on a specific value in the bucket. + returned: success + type: complex + contains: + numFiniteBuckets: + description: + - Must be greater than 0. + returned: success + type: int + growthFactor: + description: + - Must be greater than 1. + returned: success + type: str + scale: + description: + - Must be greater than 0. + returned: success + type: str + explicitBuckets: + description: + - Specifies a set of buckets with arbitrary widths. + returned: success + type: complex + contains: + bounds: + description: + - The values must be monotonically increasing. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://logging.googleapis.com/v2/projects/{project}/metrics".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'logging') + return auth.list(link, return_if_object, array_name='metrics') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model.py b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model.py new file mode 100644 index 000000000..d143c98b8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model.py @@ -0,0 +1,438 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_mlengine_model +description: +- Represents a machine learning solution. +- A model can have multiple versions, each of which is a deployed, trained model ready + to receive prediction requests. The model itself is just a container. +short_description: Creates a GCP Model +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name specified for the model. + required: true + type: str + description: + description: + - The description specified for the model when it was created. + required: false + type: str + default_version: + description: + - The default version of the model. This version will be used to handle prediction + requests that do not specify a version. + required: false + type: dict + suboptions: + name: + description: + - The name specified for the version when it was created. + required: true + type: str + regions: + description: + - The list of regions where the model is going to be deployed. + - Currently only one region per model is supported . + elements: str + required: false + type: list + online_prediction_logging: + description: + - If true, online prediction access logs are sent to StackDriver Logging. + required: false + type: bool + online_prediction_console_logging: + description: + - If true, online prediction nodes send stderr and stdout streams to Stackdriver + Logging. + required: false + type: bool + labels: + description: + - One or more labels that you can add, to organize your models. + required: false + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/ai-platform/prediction/docs/reference/rest/v1/projects.models)' +- 'Official Documentation: U(https://cloud.google.com/ai-platform/prediction/docs/deploying-models)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a model + google.cloud.gcp_mlengine_model: + name: "{{ resource_name | replace('-', '_') }}" + description: My model + regions: + - us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name specified for the model. + returned: success + type: str +description: + description: + - The description specified for the model when it was created. + returned: success + type: str +defaultVersion: + description: + - The default version of the model. This version will be used to handle prediction + requests that do not specify a version. + returned: success + type: complex + contains: + name: + description: + - The name specified for the version when it was created. + returned: success + type: str +regions: + description: + - The list of regions where the model is going to be deployed. + - Currently only one region per model is supported . + returned: success + type: list +onlinePredictionLogging: + description: + - If true, online prediction access logs are sent to StackDriver Logging. + returned: success + type: bool +onlinePredictionConsoleLogging: + description: + - If true, online prediction nodes send stderr and stdout streams to Stackdriver + Logging. + returned: success + type: bool +labels: + description: + - One or more labels that you can add, to organize your models. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + default_version=dict(type='dict', options=dict(name=dict(required=True, type='str'))), + regions=dict(type='list', elements='str'), + online_prediction_logging=dict(type='bool'), + online_prediction_console_logging=dict(type='bool'), + labels=dict(type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'mlengine') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + delete(module, self_link(module)) + create(module, collection(module)) + + +def delete(module, link): + auth = GcpSession(module, 'mlengine') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'defaultVersion': ModelDefaultversion(module.params.get('default_version', {}), module).to_request(), + u'regions': module.params.get('regions'), + u'onlinePredictionLogging': module.params.get('online_prediction_logging'), + u'onlinePredictionConsoleLogging': module.params.get('online_prediction_console_logging'), + u'labels': module.params.get('labels'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'mlengine') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://ml.googleapis.com/v1/projects/{project}/models/{name}".format(**module.params) + + +def collection(module): + return "https://ml.googleapis.com/v1/projects/{project}/models".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'defaultVersion': ModelDefaultversion(response.get(u'defaultVersion', {}), module).from_response(), + u'regions': response.get(u'regions'), + u'onlinePredictionLogging': response.get(u'onlinePredictionLogging'), + u'onlinePredictionConsoleLogging': response.get(u'onlinePredictionConsoleLogging'), + u'labels': response.get(u'labels'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://ml.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +# Short names are given (and expected) by the API +# but are returned as full names. +def decode_response(response, module): + if 'name' in response and 'metadata' not in response: + response['name'] = response['name'].split('/')[-1] + return response + + +class ModelDefaultversion(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'name': self.request.get('name')}) + + def from_response(self): + return remove_nones_from_dict({u'name': self.request.get(u'name')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model_info.py new file mode 100644 index 000000000..cdd233058 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_model_info.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_mlengine_model_info +description: +- Gather info for GCP Model +short_description: Gather info for GCP Model +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a model + gcp_mlengine_model_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name specified for the model. + returned: success + type: str + description: + description: + - The description specified for the model when it was created. + returned: success + type: str + defaultVersion: + description: + - The default version of the model. This version will be used to handle prediction + requests that do not specify a version. + returned: success + type: complex + contains: + name: + description: + - The name specified for the version when it was created. + returned: success + type: str + regions: + description: + - The list of regions where the model is going to be deployed. + - Currently only one region per model is supported . + returned: success + type: list + onlinePredictionLogging: + description: + - If true, online prediction access logs are sent to StackDriver Logging. + returned: success + type: bool + onlinePredictionConsoleLogging: + description: + - If true, online prediction nodes send stderr and stdout streams to Stackdriver + Logging. + returned: success + type: bool + labels: + description: + - One or more labels that you can add, to organize your models. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://ml.googleapis.com/v1/projects/{project}/models".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'mlengine') + return auth.list(link, return_if_object, array_name='models') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version.py b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version.py new file mode 100644 index 000000000..5bb0620cc --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version.py @@ -0,0 +1,634 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_mlengine_version +description: +- Each version is a trained model deployed in the cloud, ready to handle prediction + requests. A model can have multiple versions . +short_description: Creates a GCP Version +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The name specified for the version when it was created. + - The version name must be unique within the model it is created in. + required: true + type: str + description: + description: + - The description specified for the version when it was created. + required: false + type: str + deployment_uri: + description: + - The Cloud Storage location of the trained model used to create the version. + required: true + type: str + runtime_version: + description: + - The AI Platform runtime version to use for this deployment. + required: false + type: str + machine_type: + description: + - The type of machine on which to serve the model. Currently only applies to online + prediction service. + - 'Some valid choices include: "mls1-c1-m2", "mls1-c4-m2"' + required: false + type: str + labels: + description: + - One or more labels that you can add, to organize your model versions. + required: false + type: dict + framework: + description: + - The machine learning framework AI Platform uses to train this version of the + model. + - 'Some valid choices include: "FRAMEWORK_UNSPECIFIED", "TENSORFLOW", "SCIKIT_LEARN", + "XGBOOST"' + required: false + type: str + python_version: + description: + - The version of Python used in prediction. If not set, the default version is + '2.7'. Python '3.5' is available when runtimeVersion is set to '1.4' and above. + Python '2.7' works with all supported runtime versions. + - 'Some valid choices include: "2.7", "3.5"' + required: false + type: str + service_account: + description: + - Specifies the service account for resource access control. + required: false + type: str + auto_scaling: + description: + - Automatically scale the number of nodes used to serve the model in response + to increases and decreases in traffic. Care should be taken to ramp up traffic + according to the model's ability to scale or you will start seeing increases + in latency and 429 response codes. + required: false + type: dict + suboptions: + min_nodes: + description: + - The minimum number of nodes to allocate for this mode. + required: false + type: int + manual_scaling: + description: + - Manually select the number of nodes to use for serving the model. You should + generally use autoScaling with an appropriate minNodes instead, but this option + is available if you want more predictable billing. Beware that latency and error + rates will increase if the traffic exceeds that capability of the system to + serve it based on the selected number of nodes. + required: false + type: dict + suboptions: + nodes: + description: + - The number of nodes to allocate for this model. These nodes are always up, + starting from the time the model is deployed. + required: false + type: int + prediction_class: + description: + - The fully qualified name (module_name.class_name) of a class that implements + the Predictor interface described in this reference field. The module containing + this class should be included in a package provided to the packageUris field. + required: false + type: str + model: + description: + - The model that this version belongs to. + - 'This field represents a link to a Model resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_mlengine_model task and then set this model field to "{{ name-of-resource + }}"' + required: true + type: dict + is_default: + description: + - If true, this version will be used to handle prediction requests that do not + specify a version. + required: false + type: bool + aliases: + - default + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a model + google.cloud.gcp_mlengine_model: + name: model_version + description: My model + regions: + - us-central1 + online_prediction_logging: 'true' + online_prediction_console_logging: 'true' + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: model + +- name: create a version + google.cloud.gcp_mlengine_version: + name: "{{ resource_name | replace('-', '_') }}" + model: "{{ model }}" + runtime_version: 1.13 + python_version: 3.5 + is_default: 'true' + deployment_uri: gs://ansible-cloudml-bucket/ + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The name specified for the version when it was created. + - The version name must be unique within the model it is created in. + returned: success + type: str +description: + description: + - The description specified for the version when it was created. + returned: success + type: str +deploymentUri: + description: + - The Cloud Storage location of the trained model used to create the version. + returned: success + type: str +createTime: + description: + - The time the version was created. + returned: success + type: str +lastUseTime: + description: + - The time the version was last used for prediction. + returned: success + type: str +runtimeVersion: + description: + - The AI Platform runtime version to use for this deployment. + returned: success + type: str +machineType: + description: + - The type of machine on which to serve the model. Currently only applies to online + prediction service. + returned: success + type: str +state: + description: + - The state of a version. + returned: success + type: str +errorMessage: + description: + - The details of a failure or cancellation. + returned: success + type: str +packageUris: + description: + - Cloud Storage paths (gs://…) of packages for custom prediction routines or scikit-learn + pipelines with custom code. + returned: success + type: list +labels: + description: + - One or more labels that you can add, to organize your model versions. + returned: success + type: dict +framework: + description: + - The machine learning framework AI Platform uses to train this version of the model. + returned: success + type: str +pythonVersion: + description: + - The version of Python used in prediction. If not set, the default version is '2.7'. + Python '3.5' is available when runtimeVersion is set to '1.4' and above. Python + '2.7' works with all supported runtime versions. + returned: success + type: str +serviceAccount: + description: + - Specifies the service account for resource access control. + returned: success + type: str +autoScaling: + description: + - Automatically scale the number of nodes used to serve the model in response to + increases and decreases in traffic. Care should be taken to ramp up traffic according + to the model's ability to scale or you will start seeing increases in latency + and 429 response codes. + returned: success + type: complex + contains: + minNodes: + description: + - The minimum number of nodes to allocate for this mode. + returned: success + type: int +manualScaling: + description: + - Manually select the number of nodes to use for serving the model. You should generally + use autoScaling with an appropriate minNodes instead, but this option is available + if you want more predictable billing. Beware that latency and error rates will + increase if the traffic exceeds that capability of the system to serve it based + on the selected number of nodes. + returned: success + type: complex + contains: + nodes: + description: + - The number of nodes to allocate for this model. These nodes are always up, + starting from the time the model is deployed. + returned: success + type: int +predictionClass: + description: + - The fully qualified name (module_name.class_name) of a class that implements the + Predictor interface described in this reference field. The module containing this + class should be included in a package provided to the packageUris field. + returned: success + type: str +model: + description: + - The model that this version belongs to. + returned: success + type: dict +isDefault: + description: + - If true, this version will be used to handle prediction requests that do not specify + a version. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + deployment_uri=dict(required=True, type='str'), + runtime_version=dict(type='str'), + machine_type=dict(type='str'), + labels=dict(type='dict'), + framework=dict(type='str'), + python_version=dict(type='str'), + service_account=dict(type='str'), + auto_scaling=dict(type='dict', options=dict(min_nodes=dict(type='int'))), + manual_scaling=dict(type='dict', options=dict(nodes=dict(type='int'))), + prediction_class=dict(type='str'), + model=dict(required=True, type='dict'), + is_default=dict(type='bool', aliases=['default']), + ), + mutually_exclusive=[['auto_scaling', 'manual_scaling']], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + if module.params.get('is_default') is True: + set_default(module) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'mlengine') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + if module.params.get('is_default') is True: + set_default(module) + + +def delete(module, link): + auth = GcpSession(module, 'mlengine') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'deploymentUri': module.params.get('deployment_uri'), + u'runtimeVersion': module.params.get('runtime_version'), + u'machineType': module.params.get('machine_type'), + u'labels': module.params.get('labels'), + u'framework': module.params.get('framework'), + u'pythonVersion': module.params.get('python_version'), + u'serviceAccount': module.params.get('service_account'), + u'autoScaling': VersionAutoscaling(module.params.get('auto_scaling', {}), module).to_request(), + u'manualScaling': VersionManualscaling(module.params.get('manual_scaling', {}), module).to_request(), + u'predictionClass': module.params.get('prediction_class'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'mlengine') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + res = {'project': module.params['project'], 'model': replace_resource_dict(module.params['model'], 'name'), 'name': module.params['name']} + return "https://ml.googleapis.com/v1/projects/{project}/models/{model}/versions/{name}".format(**res) + + +def collection(module): + res = {'project': module.params['project'], 'model': replace_resource_dict(module.params['model'], 'name')} + return "https://ml.googleapis.com/v1/projects/{project}/models/{model}/versions".format(**res) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'description': response.get(u'description'), + u'deploymentUri': response.get(u'deploymentUri'), + u'createTime': response.get(u'createTime'), + u'lastUseTime': response.get(u'lastUseTime'), + u'runtimeVersion': response.get(u'runtimeVersion'), + u'machineType': response.get(u'machineType'), + u'state': response.get(u'state'), + u'errorMessage': response.get(u'errorMessage'), + u'packageUris': response.get(u'packageUris'), + u'labels': response.get(u'labels'), + u'framework': response.get(u'framework'), + u'pythonVersion': response.get(u'pythonVersion'), + u'serviceAccount': response.get(u'serviceAccount'), + u'autoScaling': VersionAutoscaling(response.get(u'autoScaling', {}), module).from_response(), + u'manualScaling': VersionManualscaling(response.get(u'manualScaling', {}), module).from_response(), + u'predictionClass': response.get(u'predictionClass'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://ml.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +# Short names are given (and expected) by the API +# but are returned as full names. +def decode_response(response, module): + if 'name' in response and 'metadata' not in response: + response['name'] = response['name'].split('/')[-1] + return response + + +# Sets this version as default. +def set_default(module): + res = {'project': module.params['project'], 'model': replace_resource_dict(module.params['model'], 'name'), 'name': module.params['name']} + link = "https://ml.googleapis.com/v1/projects/{project}/models/{model}/versions/{name}:setDefault".format(**res) + + auth = GcpSession(module, 'mlengine') + return_if_object(module, auth.post(link)) + + +class VersionAutoscaling(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'minNodes': self.request.get('min_nodes')}) + + def from_response(self): + return remove_nones_from_dict({u'minNodes': self.request.get(u'minNodes')}) + + +class VersionManualscaling(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'nodes': self.request.get('nodes')}) + + def from_response(self): + return remove_nones_from_dict({u'nodes': self.request.get(u'nodes')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version_info.py new file mode 100644 index 000000000..da88e7b96 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_mlengine_version_info.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_mlengine_version_info +description: +- Gather info for GCP Version +short_description: Gather info for GCP Version +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + model: + description: + - The model that this version belongs to. + - 'This field represents a link to a Model resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_mlengine_model task and then set this model field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a version + gcp_mlengine_version_info: + model: "{{ model }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name specified for the version when it was created. + - The version name must be unique within the model it is created in. + returned: success + type: str + description: + description: + - The description specified for the version when it was created. + returned: success + type: str + deploymentUri: + description: + - The Cloud Storage location of the trained model used to create the version. + returned: success + type: str + createTime: + description: + - The time the version was created. + returned: success + type: str + lastUseTime: + description: + - The time the version was last used for prediction. + returned: success + type: str + runtimeVersion: + description: + - The AI Platform runtime version to use for this deployment. + returned: success + type: str + machineType: + description: + - The type of machine on which to serve the model. Currently only applies to + online prediction service. + returned: success + type: str + state: + description: + - The state of a version. + returned: success + type: str + errorMessage: + description: + - The details of a failure or cancellation. + returned: success + type: str + packageUris: + description: + - Cloud Storage paths (gs://…) of packages for custom prediction routines or + scikit-learn pipelines with custom code. + returned: success + type: list + labels: + description: + - One or more labels that you can add, to organize your model versions. + returned: success + type: dict + framework: + description: + - The machine learning framework AI Platform uses to train this version of the + model. + returned: success + type: str + pythonVersion: + description: + - The version of Python used in prediction. If not set, the default version + is '2.7'. Python '3.5' is available when runtimeVersion is set to '1.4' and + above. Python '2.7' works with all supported runtime versions. + returned: success + type: str + serviceAccount: + description: + - Specifies the service account for resource access control. + returned: success + type: str + autoScaling: + description: + - Automatically scale the number of nodes used to serve the model in response + to increases and decreases in traffic. Care should be taken to ramp up traffic + according to the model's ability to scale or you will start seeing increases + in latency and 429 response codes. + returned: success + type: complex + contains: + minNodes: + description: + - The minimum number of nodes to allocate for this mode. + returned: success + type: int + manualScaling: + description: + - Manually select the number of nodes to use for serving the model. You should + generally use autoScaling with an appropriate minNodes instead, but this option + is available if you want more predictable billing. Beware that latency and + error rates will increase if the traffic exceeds that capability of the system + to serve it based on the selected number of nodes. + returned: success + type: complex + contains: + nodes: + description: + - The number of nodes to allocate for this model. These nodes are always + up, starting from the time the model is deployed. + returned: success + type: int + predictionClass: + description: + - The fully qualified name (module_name.class_name) of a class that implements + the Predictor interface described in this reference field. The module containing + this class should be included in a package provided to the packageUris field. + returned: success + type: str + model: + description: + - The model that this version belongs to. + returned: success + type: dict + isDefault: + description: + - If true, this version will be used to handle prediction requests that do not + specify a version. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(model=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'model': replace_resource_dict(module.params['model'], 'name')} + return "https://ml.googleapis.com/v1/projects/{project}/models/{model}/versions".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'mlengine') + return auth.list(link, return_if_object, array_name='versions') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription.py b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription.py new file mode 100644 index 000000000..08edb64f2 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription.py @@ -0,0 +1,875 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_pubsub_subscription +description: +- A named resource representing the stream of messages from a single, specific topic, + to be delivered to the subscribing application. +short_description: Creates a GCP Subscription +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the subscription. + required: true + type: str + topic: + description: + - A reference to a Topic resource. + - 'This field represents a link to a Topic resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_pubsub_topic task and then set this topic field to "{{ name-of-resource + }}"' + required: true + type: dict + labels: + description: + - A set of key/value label pairs to assign to this Subscription. + required: false + type: dict + push_config: + description: + - If push delivery is used with this subscription, this field is used to configure + it. An empty pushConfig signifies that the subscriber will pull and ack messages + using API methods. + required: false + type: dict + suboptions: + oidc_token: + description: + - If specified, Pub/Sub will generate and attach an OIDC JWT token as an Authorization + header in the HTTP request for every pushed message. + required: false + type: dict + suboptions: + service_account_email: + description: + - Service account email to be used for generating the OIDC token. + - The caller (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig + RPCs) must have the iam.serviceAccounts.actAs permission for the service + account. + required: true + type: str + audience: + description: + - 'Audience to be used when generating OIDC token. The audience claim + identifies the recipients that the JWT is intended for. The audience + value is a single case-sensitive string. Having multiple values (array) + for the audience field is not supported. More info about the OIDC JWT + token audience here: U(https://tools.ietf.org/html/rfc7519#section-4.1.3) + Note: if not specified, the Push endpoint URL will be used.' + required: false + type: str + push_endpoint: + description: + - A URL locating the endpoint to which messages should be pushed. + - For example, a Webhook endpoint might use "U(https://example.com/push"). + required: true + type: str + attributes: + description: + - Endpoint configuration attributes. + - Every endpoint has a set of API supported attributes that can be used to + control different aspects of the message delivery. + - The currently supported attribute is x-goog-version, which you can use to + change the format of the pushed message. This attribute indicates the version + of the data expected by the endpoint. This controls the shape of the pushed + message (i.e., its fields and metadata). The endpoint version is based on + the version of the Pub/Sub API. + - If not present during the subscriptions.create call, it will default to + the version of the API used to make such call. If not present during a subscriptions.modifyPushConfig + call, its value will not be changed. subscriptions.get calls will always + return a valid version, even if the subscription was created without this + attribute. + - 'The possible values for this attribute are: - v1beta1: uses the push format + defined in the v1beta1 Pub/Sub API.' + - "- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API." + required: false + type: dict + ack_deadline_seconds: + description: + - This value is the maximum time after a subscriber receives a message before + the subscriber should acknowledge the message. After message delivery but before + the ack deadline expires and before the message is acknowledged, it is an outstanding + message and will not be delivered again during that time (on a best-effort basis). + - For pull subscriptions, this value is used as the initial value for the ack + deadline. To override this value for a given message, call subscriptions.modifyAckDeadline + with the corresponding ackId if using pull. The minimum custom deadline you + can specify is 10 seconds. The maximum custom deadline you can specify is 600 + seconds (10 minutes). + - If this parameter is 0, a default value of 10 seconds is used. + - For push delivery, this value is also used to set the request timeout for the + call to the push endpoint. + - If the subscriber never acknowledges the message, the Pub/Sub system will eventually + redeliver the message. + required: false + type: int + message_retention_duration: + description: + - How long to retain unacknowledged messages in the subscription's backlog, from + the moment a message is published. If retainAckedMessages is true, then this + also configures the retention of acknowledged messages, and thus configures + how far back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot + be more than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`). + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: `"600.5s"`.' + required: false + default: 604800s + type: str + retain_acked_messages: + description: + - Indicates whether to retain acknowledged messages. If `true`, then messages + are not expunged from the subscription's backlog, even if they are acknowledged, + until they fall out of the messageRetentionDuration window. + required: false + type: bool + expiration_policy: + description: + - A policy that specifies the conditions for this subscription's expiration. + - A subscription is considered active as long as any connected subscriber is successfully + consuming messages from the subscription or is issuing operations on the subscription. + If expirationPolicy is not set, a default policy with ttl of 31 days will be + used. If it is set but ttl is "", the resource never expires. The minimum allowed + value for expirationPolicy.ttl is 1 day. + required: false + type: dict + suboptions: + ttl: + description: + - Specifies the "time-to-live" duration for an associated resource. The resource + expires if it is not active for a period of ttl. + - If ttl is not set, the associated resource never expires. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + - Example - "3.5s". + required: true + type: str + filter: + description: + - The subscription only delivers the messages that match the filter. Pub/Sub automatically + acknowledges the messages that don't match the filter. You can filter messages + by their attributes. The maximum length of a filter is 256 bytes. After creating + the subscription, you can't modify the filter. + required: false + type: str + dead_letter_policy: + description: + - A policy that specifies the conditions for dead lettering messages in this subscription. + If dead_letter_policy is not set, dead lettering is disabled. + - The Cloud Pub/Sub service account associated with this subscription's parent + project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) + must have permission to Acknowledge() messages on this subscription. + required: false + type: dict + suboptions: + dead_letter_topic: + description: + - The name of the topic to which dead letter messages should be published. + - Format is `projects/{project}/topics/{topic}`. + - The Cloud Pub/Sub service account associated with the enclosing subscription's + parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) + must have permission to Publish() to this topic. + - The operation will fail if the topic does not exist. + - Users should ensure that there is a subscription attached to this topic + since messages published to a topic with no subscriptions are lost. + required: false + type: str + max_delivery_attempts: + description: + - The maximum number of delivery attempts for any message. The value must + be between 5 and 100. + - The number of delivery attempts is defined as 1 + (the sum of number of + NACKs and number of times the acknowledgement deadline has been exceeded + for the message). + - A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client + libraries may automatically extend ack_deadlines. + - This field will be honored on a best effort basis. + - If this parameter is 0, a default value of 5 is used. + required: false + type: int + retry_policy: + description: + - A policy that specifies how Pub/Sub retries message delivery for this subscription. + - If not set, the default retry policy is applied. This generally implies that + messages will be retried as soon as possible for healthy subscribers. RetryPolicy + will be triggered on NACKs or acknowledgement deadline exceeded events for a + given message . + required: false + type: dict + suboptions: + minimum_backoff: + description: + - The minimum delay between consecutive deliveries of a given message. Value + should be between 0 and 600 seconds. Defaults to 10 seconds. + - 'A duration in seconds with up to nine fractional digits, terminated by + ''s''. Example: "3.5s".' + required: false + type: str + maximum_backoff: + description: + - 'The maximum delay between consecutive deliveries of a given message. Value + should be between 0 and 600 seconds. Defaults to 600 seconds. A duration + in seconds with up to nine fractional digits, terminated by ''s''. Example: + "3.5s".' + required: false + type: str + enable_message_ordering: + description: + - If `true`, messages published with the same orderingKey in PubsubMessage will + be delivered to the subscribers in the order in which they are received by the + Pub/Sub system. Otherwise, they may be delivered in any order. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions)' +- 'Managing Subscriptions: U(https://cloud.google.com/pubsub/docs/admin#managing_subscriptions)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a topic + google.cloud.gcp_pubsub_topic: + name: topic-subscription + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: topic + +- name: create a subscription + google.cloud.gcp_pubsub_subscription: + name: test_object + topic: "{{ topic }}" + ack_deadline_seconds: 300 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - Name of the subscription. + returned: success + type: str +topic: + description: + - A reference to a Topic resource. + returned: success + type: dict +labels: + description: + - A set of key/value label pairs to assign to this Subscription. + returned: success + type: dict +pushConfig: + description: + - If push delivery is used with this subscription, this field is used to configure + it. An empty pushConfig signifies that the subscriber will pull and ack messages + using API methods. + returned: success + type: complex + contains: + oidcToken: + description: + - If specified, Pub/Sub will generate and attach an OIDC JWT token as an Authorization + header in the HTTP request for every pushed message. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating the OIDC token. + - The caller (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig + RPCs) must have the iam.serviceAccounts.actAs permission for the service + account. + returned: success + type: str + audience: + description: + - 'Audience to be used when generating OIDC token. The audience claim identifies + the recipients that the JWT is intended for. The audience value is a single + case-sensitive string. Having multiple values (array) for the audience + field is not supported. More info about the OIDC JWT token audience here: + U(https://tools.ietf.org/html/rfc7519#section-4.1.3) Note: if not specified, + the Push endpoint URL will be used.' + returned: success + type: str + pushEndpoint: + description: + - A URL locating the endpoint to which messages should be pushed. + - For example, a Webhook endpoint might use "U(https://example.com/push"). + returned: success + type: str + attributes: + description: + - Endpoint configuration attributes. + - Every endpoint has a set of API supported attributes that can be used to control + different aspects of the message delivery. + - The currently supported attribute is x-goog-version, which you can use to + change the format of the pushed message. This attribute indicates the version + of the data expected by the endpoint. This controls the shape of the pushed + message (i.e., its fields and metadata). The endpoint version is based on + the version of the Pub/Sub API. + - If not present during the subscriptions.create call, it will default to the + version of the API used to make such call. If not present during a subscriptions.modifyPushConfig + call, its value will not be changed. subscriptions.get calls will always return + a valid version, even if the subscription was created without this attribute. + - 'The possible values for this attribute are: - v1beta1: uses the push format + defined in the v1beta1 Pub/Sub API.' + - "- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API." + returned: success + type: dict +ackDeadlineSeconds: + description: + - This value is the maximum time after a subscriber receives a message before the + subscriber should acknowledge the message. After message delivery but before the + ack deadline expires and before the message is acknowledged, it is an outstanding + message and will not be delivered again during that time (on a best-effort basis). + - For pull subscriptions, this value is used as the initial value for the ack deadline. + To override this value for a given message, call subscriptions.modifyAckDeadline + with the corresponding ackId if using pull. The minimum custom deadline you can + specify is 10 seconds. The maximum custom deadline you can specify is 600 seconds + (10 minutes). + - If this parameter is 0, a default value of 10 seconds is used. + - For push delivery, this value is also used to set the request timeout for the + call to the push endpoint. + - If the subscriber never acknowledges the message, the Pub/Sub system will eventually + redeliver the message. + returned: success + type: int +messageRetentionDuration: + description: + - How long to retain unacknowledged messages in the subscription's backlog, from + the moment a message is published. If retainAckedMessages is true, then this also + configures the retention of acknowledged messages, and thus configures how far + back in time a subscriptions.seek can be done. Defaults to 7 days. Cannot be more + than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`). + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: `"600.5s"`.' + returned: success + type: str +retainAckedMessages: + description: + - Indicates whether to retain acknowledged messages. If `true`, then messages are + not expunged from the subscription's backlog, even if they are acknowledged, until + they fall out of the messageRetentionDuration window. + returned: success + type: bool +expirationPolicy: + description: + - A policy that specifies the conditions for this subscription's expiration. + - A subscription is considered active as long as any connected subscriber is successfully + consuming messages from the subscription or is issuing operations on the subscription. + If expirationPolicy is not set, a default policy with ttl of 31 days will be used. + If it is set but ttl is "", the resource never expires. The minimum allowed value + for expirationPolicy.ttl is 1 day. + returned: success + type: complex + contains: + ttl: + description: + - Specifies the "time-to-live" duration for an associated resource. The resource + expires if it is not active for a period of ttl. + - If ttl is not set, the associated resource never expires. + - A duration in seconds with up to nine fractional digits, terminated by 's'. + - Example - "3.5s". + returned: success + type: str +filter: + description: + - The subscription only delivers the messages that match the filter. Pub/Sub automatically + acknowledges the messages that don't match the filter. You can filter messages + by their attributes. The maximum length of a filter is 256 bytes. After creating + the subscription, you can't modify the filter. + returned: success + type: str +deadLetterPolicy: + description: + - A policy that specifies the conditions for dead lettering messages in this subscription. + If dead_letter_policy is not set, dead lettering is disabled. + - The Cloud Pub/Sub service account associated with this subscription's parent project + (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + permission to Acknowledge() messages on this subscription. + returned: success + type: complex + contains: + deadLetterTopic: + description: + - The name of the topic to which dead letter messages should be published. + - Format is `projects/{project}/topics/{topic}`. + - The Cloud Pub/Sub service account associated with the enclosing subscription's + parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) + must have permission to Publish() to this topic. + - The operation will fail if the topic does not exist. + - Users should ensure that there is a subscription attached to this topic since + messages published to a topic with no subscriptions are lost. + returned: success + type: str + maxDeliveryAttempts: + description: + - The maximum number of delivery attempts for any message. The value must be + between 5 and 100. + - The number of delivery attempts is defined as 1 + (the sum of number of NACKs + and number of times the acknowledgement deadline has been exceeded for the + message). + - A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client + libraries may automatically extend ack_deadlines. + - This field will be honored on a best effort basis. + - If this parameter is 0, a default value of 5 is used. + returned: success + type: int +retryPolicy: + description: + - A policy that specifies how Pub/Sub retries message delivery for this subscription. + - If not set, the default retry policy is applied. This generally implies that messages + will be retried as soon as possible for healthy subscribers. RetryPolicy will + be triggered on NACKs or acknowledgement deadline exceeded events for a given + message . + returned: success + type: complex + contains: + minimumBackoff: + description: + - The minimum delay between consecutive deliveries of a given message. Value + should be between 0 and 600 seconds. Defaults to 10 seconds. + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: "3.5s".' + returned: success + type: str + maximumBackoff: + description: + - 'The maximum delay between consecutive deliveries of a given message. Value + should be between 0 and 600 seconds. Defaults to 600 seconds. A duration in + seconds with up to nine fractional digits, terminated by ''s''. Example: "3.5s".' + returned: success + type: str +enableMessageOrdering: + description: + - If `true`, messages published with the same orderingKey in PubsubMessage will + be delivered to the subscribers in the order in which they are received by the + Pub/Sub system. Otherwise, they may be delivered in any order. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + topic=dict(required=True, type='dict'), + labels=dict(type='dict'), + push_config=dict( + type='dict', + options=dict( + oidc_token=dict(type='dict', options=dict(service_account_email=dict(required=True, type='str'), audience=dict(type='str'))), + push_endpoint=dict(required=True, type='str'), + attributes=dict(type='dict'), + ), + ), + ack_deadline_seconds=dict(type='int'), + message_retention_duration=dict(default='604800s', type='str'), + retain_acked_messages=dict(type='bool'), + expiration_policy=dict(type='dict', options=dict(ttl=dict(required=True, type='str'))), + filter=dict(type='str'), + dead_letter_policy=dict(type='dict', options=dict(dead_letter_topic=dict(type='str'), max_delivery_attempts=dict(type='int'))), + retry_policy=dict(type='dict', options=dict(minimum_backoff=dict(type='str'), maximum_backoff=dict(type='str'))), + enable_message_ordering=dict(type='bool'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, self_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'pubsub') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'pubsub') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + del request['name'] + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('labels') != response.get('labels'): + update_mask.append('labels') + if request.get('pushConfig') != response.get('pushConfig'): + update_mask.append('pushConfig') + if request.get('ackDeadlineSeconds') != response.get('ackDeadlineSeconds'): + update_mask.append('ackDeadlineSeconds') + if request.get('messageRetentionDuration') != response.get('messageRetentionDuration'): + update_mask.append('messageRetentionDuration') + if request.get('retainAckedMessages') != response.get('retainAckedMessages'): + update_mask.append('retainAckedMessages') + if request.get('expirationPolicy') != response.get('expirationPolicy'): + update_mask.append('expirationPolicy') + if request.get('deadLetterPolicy') != response.get('deadLetterPolicy'): + update_mask.append('deadLetterPolicy') + if request.get('retryPolicy') != response.get('retryPolicy'): + update_mask.append('retryPolicy') + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'pubsub') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': name_pattern(module.params.get('name'), module), + u'topic': topic_pattern(replace_resource_dict(module.params.get(u'topic', {}), 'name'), module), + u'labels': module.params.get('labels'), + u'pushConfig': SubscriptionPushconfig(module.params.get('push_config', {}), module).to_request(), + u'ackDeadlineSeconds': module.params.get('ack_deadline_seconds'), + u'messageRetentionDuration': module.params.get('message_retention_duration'), + u'retainAckedMessages': module.params.get('retain_acked_messages'), + u'expirationPolicy': SubscriptionExpirationpolicy(module.params.get('expiration_policy', {}), module).to_request(), + u'filter': module.params.get('filter'), + u'deadLetterPolicy': SubscriptionDeadletterpolicy(module.params.get('dead_letter_policy', {}), module).to_request(), + u'retryPolicy': SubscriptionRetrypolicy(module.params.get('retry_policy', {}), module).to_request(), + u'enableMessageOrdering': module.params.get('enable_message_ordering'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'pubsub') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/subscriptions/{name}".format(**module.params) + + +def collection(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/subscriptions".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': name_pattern(module.params.get('name'), module), + u'topic': topic_pattern(replace_resource_dict(module.params.get(u'topic', {}), 'name'), module), + u'labels': response.get(u'labels'), + u'pushConfig': SubscriptionPushconfig(response.get(u'pushConfig', {}), module).from_response(), + u'ackDeadlineSeconds': response.get(u'ackDeadlineSeconds'), + u'messageRetentionDuration': response.get(u'messageRetentionDuration'), + u'retainAckedMessages': response.get(u'retainAckedMessages'), + u'expirationPolicy': SubscriptionExpirationpolicy(response.get(u'expirationPolicy', {}), module).from_response(), + u'filter': module.params.get('filter'), + u'deadLetterPolicy': SubscriptionDeadletterpolicy(response.get(u'deadLetterPolicy', {}), module).from_response(), + u'retryPolicy': SubscriptionRetrypolicy(response.get(u'retryPolicy', {}), module).from_response(), + u'enableMessageOrdering': module.params.get('enable_message_ordering'), + } + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/subscriptions/.*" + + if not re.match(regex, name): + name = "projects/{project}/subscriptions/{name}".format(**module.params) + + return name + + +def topic_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/topics/.*" + + if not re.match(regex, name): + formatted_params = { + 'project': module.params['project'], + 'topic': replace_resource_dict(module.params['topic'], 'name'), + } + name = "projects/{project}/topics/{topic}".format(**formatted_params) + + return name + + +class SubscriptionPushconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'oidcToken': SubscriptionOidctoken(self.request.get('oidc_token', {}), self.module).to_request(), + u'pushEndpoint': self.request.get('push_endpoint'), + u'attributes': self.request.get('attributes'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'oidcToken': SubscriptionOidctoken(self.request.get(u'oidcToken', {}), self.module).from_response(), + u'pushEndpoint': self.request.get(u'pushEndpoint'), + u'attributes': self.request.get(u'attributes'), + } + ) + + +class SubscriptionOidctoken(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'audience': self.request.get('audience')}) + + def from_response(self): + return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'audience': self.request.get(u'audience')}) + + +class SubscriptionExpirationpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'ttl': self.request.get('ttl')}) + + def from_response(self): + return remove_nones_from_dict({u'ttl': self.request.get(u'ttl')}) + + +class SubscriptionDeadletterpolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'deadLetterTopic': self.request.get('dead_letter_topic'), u'maxDeliveryAttempts': self.request.get('max_delivery_attempts')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'deadLetterTopic': self.request.get(u'deadLetterTopic'), u'maxDeliveryAttempts': self.request.get(u'maxDeliveryAttempts')} + ) + + +class SubscriptionRetrypolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'minimumBackoff': self.request.get('minimum_backoff'), u'maximumBackoff': self.request.get('maximum_backoff')}) + + def from_response(self): + return remove_nones_from_dict({u'minimumBackoff': self.request.get(u'minimumBackoff'), u'maximumBackoff': self.request.get(u'maximumBackoff')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription_info.py new file mode 100644 index 000000000..ee5cf64d1 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_subscription_info.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_pubsub_subscription_info +description: +- Gather info for GCP Subscription +short_description: Gather info for GCP Subscription +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a subscription + gcp_pubsub_subscription_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Name of the subscription. + returned: success + type: str + topic: + description: + - A reference to a Topic resource. + returned: success + type: dict + labels: + description: + - A set of key/value label pairs to assign to this Subscription. + returned: success + type: dict + pushConfig: + description: + - If push delivery is used with this subscription, this field is used to configure + it. An empty pushConfig signifies that the subscriber will pull and ack messages + using API methods. + returned: success + type: complex + contains: + oidcToken: + description: + - If specified, Pub/Sub will generate and attach an OIDC JWT token as an + Authorization header in the HTTP request for every pushed message. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating the OIDC token. + - The caller (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig + RPCs) must have the iam.serviceAccounts.actAs permission for the service + account. + returned: success + type: str + audience: + description: + - 'Audience to be used when generating OIDC token. The audience claim + identifies the recipients that the JWT is intended for. The audience + value is a single case-sensitive string. Having multiple values (array) + for the audience field is not supported. More info about the OIDC + JWT token audience here: U(https://tools.ietf.org/html/rfc7519#section-4.1.3) + Note: if not specified, the Push endpoint URL will be used.' + returned: success + type: str + pushEndpoint: + description: + - A URL locating the endpoint to which messages should be pushed. + - For example, a Webhook endpoint might use "U(https://example.com/push"). + returned: success + type: str + attributes: + description: + - Endpoint configuration attributes. + - Every endpoint has a set of API supported attributes that can be used + to control different aspects of the message delivery. + - The currently supported attribute is x-goog-version, which you can use + to change the format of the pushed message. This attribute indicates the + version of the data expected by the endpoint. This controls the shape + of the pushed message (i.e., its fields and metadata). The endpoint version + is based on the version of the Pub/Sub API. + - If not present during the subscriptions.create call, it will default to + the version of the API used to make such call. If not present during a + subscriptions.modifyPushConfig call, its value will not be changed. subscriptions.get + calls will always return a valid version, even if the subscription was + created without this attribute. + - 'The possible values for this attribute are: - v1beta1: uses the push + format defined in the v1beta1 Pub/Sub API.' + - "- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API." + returned: success + type: dict + ackDeadlineSeconds: + description: + - This value is the maximum time after a subscriber receives a message before + the subscriber should acknowledge the message. After message delivery but + before the ack deadline expires and before the message is acknowledged, it + is an outstanding message and will not be delivered again during that time + (on a best-effort basis). + - For pull subscriptions, this value is used as the initial value for the ack + deadline. To override this value for a given message, call subscriptions.modifyAckDeadline + with the corresponding ackId if using pull. The minimum custom deadline you + can specify is 10 seconds. The maximum custom deadline you can specify is + 600 seconds (10 minutes). + - If this parameter is 0, a default value of 10 seconds is used. + - For push delivery, this value is also used to set the request timeout for + the call to the push endpoint. + - If the subscriber never acknowledges the message, the Pub/Sub system will + eventually redeliver the message. + returned: success + type: int + messageRetentionDuration: + description: + - How long to retain unacknowledged messages in the subscription's backlog, + from the moment a message is published. If retainAckedMessages is true, then + this also configures the retention of acknowledged messages, and thus configures + how far back in time a subscriptions.seek can be done. Defaults to 7 days. + Cannot be more than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`). + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: `"600.5s"`.' + returned: success + type: str + retainAckedMessages: + description: + - Indicates whether to retain acknowledged messages. If `true`, then messages + are not expunged from the subscription's backlog, even if they are acknowledged, + until they fall out of the messageRetentionDuration window. + returned: success + type: bool + expirationPolicy: + description: + - A policy that specifies the conditions for this subscription's expiration. + - A subscription is considered active as long as any connected subscriber is + successfully consuming messages from the subscription or is issuing operations + on the subscription. If expirationPolicy is not set, a default policy with + ttl of 31 days will be used. If it is set but ttl is "", the resource never + expires. The minimum allowed value for expirationPolicy.ttl is 1 day. + returned: success + type: complex + contains: + ttl: + description: + - Specifies the "time-to-live" duration for an associated resource. The + resource expires if it is not active for a period of ttl. + - If ttl is not set, the associated resource never expires. + - A duration in seconds with up to nine fractional digits, terminated by + 's'. + - Example - "3.5s". + returned: success + type: str + filter: + description: + - The subscription only delivers the messages that match the filter. Pub/Sub + automatically acknowledges the messages that don't match the filter. You can + filter messages by their attributes. The maximum length of a filter is 256 + bytes. After creating the subscription, you can't modify the filter. + returned: success + type: str + deadLetterPolicy: + description: + - A policy that specifies the conditions for dead lettering messages in this + subscription. If dead_letter_policy is not set, dead lettering is disabled. + - The Cloud Pub/Sub service account associated with this subscription's parent + project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) + must have permission to Acknowledge() messages on this subscription. + returned: success + type: complex + contains: + deadLetterTopic: + description: + - The name of the topic to which dead letter messages should be published. + - Format is `projects/{project}/topics/{topic}`. + - The Cloud Pub/Sub service account associated with the enclosing subscription's + parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) + must have permission to Publish() to this topic. + - The operation will fail if the topic does not exist. + - Users should ensure that there is a subscription attached to this topic + since messages published to a topic with no subscriptions are lost. + returned: success + type: str + maxDeliveryAttempts: + description: + - The maximum number of delivery attempts for any message. The value must + be between 5 and 100. + - The number of delivery attempts is defined as 1 + (the sum of number of + NACKs and number of times the acknowledgement deadline has been exceeded + for the message). + - A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client + libraries may automatically extend ack_deadlines. + - This field will be honored on a best effort basis. + - If this parameter is 0, a default value of 5 is used. + returned: success + type: int + retryPolicy: + description: + - A policy that specifies how Pub/Sub retries message delivery for this subscription. + - If not set, the default retry policy is applied. This generally implies that + messages will be retried as soon as possible for healthy subscribers. RetryPolicy + will be triggered on NACKs or acknowledgement deadline exceeded events for + a given message . + returned: success + type: complex + contains: + minimumBackoff: + description: + - The minimum delay between consecutive deliveries of a given message. Value + should be between 0 and 600 seconds. Defaults to 10 seconds. + - 'A duration in seconds with up to nine fractional digits, terminated by + ''s''. Example: "3.5s".' + returned: success + type: str + maximumBackoff: + description: + - 'The maximum delay between consecutive deliveries of a given message. + Value should be between 0 and 600 seconds. Defaults to 600 seconds. A + duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: "3.5s".' + returned: success + type: str + enableMessageOrdering: + description: + - If `true`, messages published with the same orderingKey in PubsubMessage will + be delivered to the subscribers in the order in which they are received by + the Pub/Sub system. Otherwise, they may be delivered in any order. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/subscriptions".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'pubsub') + return auth.list(link, return_if_object, array_name='subscriptions') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic.py b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic.py new file mode 100644 index 000000000..673df4967 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_pubsub_topic +description: +- A named resource to which messages are sent by publishers. +short_description: Creates a GCP Topic +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Name of the topic. + required: true + type: str + kms_key_name: + description: + - The resource name of the Cloud KMS CryptoKey to be used to protect access to + messages published on this topic. Your project's PubSub service account (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) + must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + - The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` . + required: false + type: str + labels: + description: + - A set of key/value label pairs to assign to this Topic. + required: false + type: dict + message_storage_policy: + description: + - Policy constraining the set of Google Cloud Platform regions where messages + published to the topic may be stored. If not present, then no constraints are + in effect. + required: false + type: dict + suboptions: + allowed_persistence_regions: + description: + - A list of IDs of GCP regions where messages that are published to the topic + may be persisted in storage. Messages published by publishers running in + non-allowed GCP regions (or running outside of GCP altogether) will be routed + for storage in one of the allowed regions. An empty list means that no regions + are allowed, and is not a valid configuration. + elements: str + required: true + type: list + schema_settings: + description: + - Settings for validating messages published against a schema. + required: false + type: dict + suboptions: + schema: + description: + - The name of the schema that messages published should be validated against. + Format is projects/{project}/schemas/{schema}. + - The value of this field will be _deleted-schema_ if the schema has been + deleted. + required: true + type: str + encoding: + description: + - The encoding of messages validated against schema. + - 'Some valid choices include: "ENCODING_UNSPECIFIED", "JSON", "BINARY"' + required: false + default: ENCODING_UNSPECIFIED + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics)' +- 'Managing Topics: U(https://cloud.google.com/pubsub/docs/admin#managing_topics)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a topic + google.cloud.gcp_pubsub_topic: + name: test-topic1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - Name of the topic. + returned: success + type: str +kmsKeyName: + description: + - The resource name of the Cloud KMS CryptoKey to be used to protect access to messages + published on this topic. Your project's PubSub service account (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) + must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + - The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` . + returned: success + type: str +labels: + description: + - A set of key/value label pairs to assign to this Topic. + returned: success + type: dict +messageStoragePolicy: + description: + - Policy constraining the set of Google Cloud Platform regions where messages published + to the topic may be stored. If not present, then no constraints are in effect. + returned: success + type: complex + contains: + allowedPersistenceRegions: + description: + - A list of IDs of GCP regions where messages that are published to the topic + may be persisted in storage. Messages published by publishers running in non-allowed + GCP regions (or running outside of GCP altogether) will be routed for storage + in one of the allowed regions. An empty list means that no regions are allowed, + and is not a valid configuration. + returned: success + type: list +schemaSettings: + description: + - Settings for validating messages published against a schema. + returned: success + type: complex + contains: + schema: + description: + - The name of the schema that messages published should be validated against. + Format is projects/{project}/schemas/{schema}. + - The value of this field will be _deleted-schema_ if the schema has been deleted. + returned: success + type: str + encoding: + description: + - The encoding of messages validated against schema. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + kms_key_name=dict(type='str'), + labels=dict(type='dict'), + message_storage_policy=dict(type='dict', options=dict(allowed_persistence_regions=dict(required=True, type='list', elements='str'))), + schema_settings=dict(type='dict', options=dict(schema=dict(required=True, type='str'), encoding=dict(default='ENCODING_UNSPECIFIED', type='str'))), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, self_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'pubsub') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'pubsub') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + del request['name'] + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('kmsKeyName') != response.get('kmsKeyName'): + update_mask.append('kmsKeyName') + if request.get('labels') != response.get('labels'): + update_mask.append('labels') + if request.get('messageStoragePolicy') != response.get('messageStoragePolicy'): + update_mask.append('messageStoragePolicy') + if request.get('schemaSettings') != response.get('schemaSettings'): + update_mask.append('schemaSettings') + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'pubsub') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': name_pattern(module.params.get('name'), module), + u'kmsKeyName': module.params.get('kms_key_name'), + u'labels': module.params.get('labels'), + u'messageStoragePolicy': TopicMessagestoragepolicy(module.params.get('message_storage_policy', {}), module).to_request(), + u'schemaSettings': TopicSchemasettings(module.params.get('schema_settings', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'pubsub') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/topics/{name}".format(**module.params) + + +def collection(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': name_pattern(module.params.get('name'), module), + u'kmsKeyName': response.get(u'kmsKeyName'), + u'labels': response.get(u'labels'), + u'messageStoragePolicy': TopicMessagestoragepolicy(response.get(u'messageStoragePolicy', {}), module).from_response(), + u'schemaSettings': TopicSchemasettings(response.get(u'schemaSettings', {}), module).from_response(), + } + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/topics/.*" + + if not re.match(regex, name): + name = "projects/{project}/topics/{name}".format(**module.params) + + return name + + +class TopicMessagestoragepolicy(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'allowedPersistenceRegions': self.request.get('allowed_persistence_regions')}) + + def from_response(self): + return remove_nones_from_dict({u'allowedPersistenceRegions': self.request.get(u'allowedPersistenceRegions')}) + + +class TopicSchemasettings(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'schema': self.request.get('schema'), u'encoding': self.request.get('encoding')}) + + def from_response(self): + return remove_nones_from_dict({u'schema': self.request.get(u'schema'), u'encoding': self.request.get(u'encoding')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic_info.py new file mode 100644 index 000000000..fa194013c --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_pubsub_topic_info.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_pubsub_topic_info +description: +- Gather info for GCP Topic +short_description: Gather info for GCP Topic +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a topic + gcp_pubsub_topic_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Name of the topic. + returned: success + type: str + kmsKeyName: + description: + - The resource name of the Cloud KMS CryptoKey to be used to protect access + to messages published on this topic. Your project's PubSub service account + (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) must + have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + - The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` . + returned: success + type: str + labels: + description: + - A set of key/value label pairs to assign to this Topic. + returned: success + type: dict + messageStoragePolicy: + description: + - Policy constraining the set of Google Cloud Platform regions where messages + published to the topic may be stored. If not present, then no constraints + are in effect. + returned: success + type: complex + contains: + allowedPersistenceRegions: + description: + - A list of IDs of GCP regions where messages that are published to the + topic may be persisted in storage. Messages published by publishers running + in non-allowed GCP regions (or running outside of GCP altogether) will + be routed for storage in one of the allowed regions. An empty list means + that no regions are allowed, and is not a valid configuration. + returned: success + type: list + schemaSettings: + description: + - Settings for validating messages published against a schema. + returned: success + type: complex + contains: + schema: + description: + - The name of the schema that messages published should be validated against. + Format is projects/{project}/schemas/{schema}. + - The value of this field will be _deleted-schema_ if the schema has been + deleted. + returned: success + type: str + encoding: + description: + - The encoding of messages validated against schema. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'pubsub') + return auth.list(link, return_if_object, array_name='topics') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py new file mode 100644 index 000000000..fe817c23e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py @@ -0,0 +1,675 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_redis_instance +description: +- A Google Cloud Redis instance. +short_description: Creates a GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + alternative_location_id: + description: + - Only applicable to STANDARD_HA tier which protects the instance against zonal + failures by provisioning it across two zones. + - If provided, it must be a different zone from the one provided in [locationId]. + required: false + type: str + auth_enabled: + description: + - Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set + to "true" AUTH is enabled on the instance. + - Default value is "false" meaning AUTH is disabled. + required: false + default: 'false' + type: bool + authorized_network: + description: + - The full name of the Google Compute Engine network to which the instance is + connected. If left unspecified, the default network will be used. + required: false + type: str + connect_mode: + description: + - The connection mode of the Redis instance. + - 'Some valid choices include: "DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"' + required: false + default: DIRECT_PEERING + type: str + display_name: + description: + - An arbitrary and optional user-provided name for the instance. + required: false + type: str + labels: + description: + - Resource labels to represent user provided metadata. + required: false + type: dict + redis_configs: + description: + - Redis configuration parameters, according to U(http://redis.io/topics/config). + - 'Please check Memorystore documentation for the list of supported parameters: + U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs) + .' + required: false + type: dict + location_id: + description: + - The zone where the instance will be provisioned. If not provided, the service + will choose a zone for the instance. For STANDARD_HA tier, instances will be + created across two zones for protection against zonal failures. If [alternativeLocationId] + is also provided, it must be different from [locationId]. + required: false + type: str + name: + description: + - The ID of the instance or a fully qualified identifier for the instance. + required: true + type: str + memory_size_gb: + description: + - Redis memory size in GiB. + required: true + type: int + redis_version: + description: + - The version of Redis software. If not provided, latest supported version will + be used. Please check the API documentation linked at the top for the latest + valid values. + required: false + type: str + reserved_ip_range: + description: + - The CIDR range of internal addresses that are reserved for this instance. If + not provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 + or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets + in an authorized network. + required: false + type: str + tier: + description: + - 'The service tier of the instance. Must be one of these values: - BASIC: standalone + instance - STANDARD_HA: highly available primary/replica instances .' + - 'Some valid choices include: "BASIC", "STANDARD_HA"' + required: false + default: BASIC + type: str + transit_encryption_mode: + description: + - The TLS mode of the Redis instance, If not provided, TLS is disabled for the + instance. + - "- SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with server + authentcation ." + - 'Some valid choices include: "SERVER_AUTHENTICATION", "DISABLED"' + required: false + default: DISABLED + type: str + region: + description: + - The name of the Redis region of the instance. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances)' +- 'Official Documentation: U(https://cloud.google.com/memorystore/docs/redis/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a network + google.cloud.gcp_compute_network: + name: network-instance + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: network + +- name: create a instance + google.cloud.gcp_redis_instance: + name: instance37 + tier: STANDARD_HA + memory_size_gb: 1 + region: us-central1 + location_id: us-central1-a + redis_version: REDIS_3_2 + display_name: Ansible Test Instance + reserved_ip_range: 192.168.0.0/29 + labels: + my_key: my_val + other_key: other_val + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +alternativeLocationId: + description: + - Only applicable to STANDARD_HA tier which protects the instance against zonal + failures by provisioning it across two zones. + - If provided, it must be a different zone from the one provided in [locationId]. + returned: success + type: str +authEnabled: + description: + - Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set + to "true" AUTH is enabled on the instance. + - Default value is "false" meaning AUTH is disabled. + returned: success + type: bool +authorizedNetwork: + description: + - The full name of the Google Compute Engine network to which the instance is connected. + If left unspecified, the default network will be used. + returned: success + type: str +connectMode: + description: + - The connection mode of the Redis instance. + returned: success + type: str +createTime: + description: + - The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds. + returned: success + type: str +currentLocationId: + description: + - The current zone where the Redis endpoint is placed. + - For Basic Tier instances, this will always be the same as the [locationId] provided + by the user at creation time. For Standard Tier instances, this can be either + [locationId] or [alternativeLocationId] and can change after a failover event. + returned: success + type: str +displayName: + description: + - An arbitrary and optional user-provided name for the instance. + returned: success + type: str +host: + description: + - Hostname or IP address of the exposed Redis endpoint used by clients to connect + to the service. + returned: success + type: str +labels: + description: + - Resource labels to represent user provided metadata. + returned: success + type: dict +redisConfigs: + description: + - Redis configuration parameters, according to U(http://redis.io/topics/config). + - 'Please check Memorystore documentation for the list of supported parameters: + U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs) + .' + returned: success + type: dict +locationId: + description: + - The zone where the instance will be provisioned. If not provided, the service + will choose a zone for the instance. For STANDARD_HA tier, instances will be created + across two zones for protection against zonal failures. If [alternativeLocationId] + is also provided, it must be different from [locationId]. + returned: success + type: str +name: + description: + - The ID of the instance or a fully qualified identifier for the instance. + returned: success + type: str +memorySizeGb: + description: + - Redis memory size in GiB. + returned: success + type: int +port: + description: + - The port number of the exposed Redis endpoint. + returned: success + type: int +persistenceIamIdentity: + description: + - Output only. Cloud IAM identity used by import / export operations to transfer + data to/from Cloud Storage. Format is "serviceAccount:". + - The value may change over time for a given instance so should be checked before + each import/export operation. + returned: success + type: str +redisVersion: + description: + - The version of Redis software. If not provided, latest supported version will + be used. Please check the API documentation linked at the top for the latest valid + values. + returned: success + type: str +reservedIpRange: + description: + - The CIDR range of internal addresses that are reserved for this instance. If not + provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 + or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets + in an authorized network. + returned: success + type: str +tier: + description: + - 'The service tier of the instance. Must be one of these values: - BASIC: standalone + instance - STANDARD_HA: highly available primary/replica instances .' + returned: success + type: str +transitEncryptionMode: + description: + - The TLS mode of the Redis instance, If not provided, TLS is disabled for the instance. + - "- SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with server + authentcation ." + returned: success + type: str +serverCaCerts: + description: + - List of server CA certificates for the instance. + returned: success + type: complex + contains: + serialNumber: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str + cert: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str + createTime: + description: + - The time when the certificate was created. + returned: success + type: str + expireTime: + description: + - The time when the certificate expires. + returned: success + type: str + sha1Fingerprint: + description: + - Sha1 Fingerprint of the certificate. + returned: success + type: str +region: + description: + - The name of the Redis region of the instance. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + alternative_location_id=dict(type='str'), + auth_enabled=dict(type='bool'), + authorized_network=dict(type='str'), + connect_mode=dict(default='DIRECT_PEERING', type='str'), + display_name=dict(type='str'), + labels=dict(type='dict'), + redis_configs=dict(type='dict'), + location_id=dict(type='str'), + name=dict(required=True, type='str'), + memory_size_gb=dict(required=True, type='int'), + redis_version=dict(type='str'), + reserved_ip_range=dict(type='str'), + tier=dict(default='BASIC', type='str'), + transit_encryption_mode=dict(default='DISABLED', type='str'), + region=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'redis') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'redis') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + del request['name'] + return wait_for_operation(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + if request.get('authEnabled') != response.get('authEnabled'): + update_mask.append('authEnabled') + if request.get('displayName') != response.get('displayName'): + update_mask.append('displayName') + if request.get('labels') != response.get('labels'): + update_mask.append('labels') + if request.get('redisConfigs') != response.get('redisConfigs'): + update_mask.append('redisConfigs') + if request.get('memorySizeGb') != response.get('memorySizeGb'): + update_mask.append('memorySizeGb') + if request.get('redisVersion') != response.get('redisVersion'): + update_mask.append('redisVersion') + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'redis') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'alternativeLocationId': module.params.get('alternative_location_id'), + u'authEnabled': module.params.get('auth_enabled'), + u'authorizedNetwork': module.params.get('authorized_network'), + u'connectMode': module.params.get('connect_mode'), + u'displayName': module.params.get('display_name'), + u'labels': module.params.get('labels'), + u'redisConfigs': module.params.get('redis_configs'), + u'locationId': module.params.get('location_id'), + u'name': module.params.get('name'), + u'memorySizeGb': module.params.get('memory_size_gb'), + u'redisVersion': module.params.get('redis_version'), + u'reservedIpRange': module.params.get('reserved_ip_range'), + u'tier': module.params.get('tier'), + u'transitEncryptionMode': module.params.get('transit_encryption_mode'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'redis') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances/{name}".format(**module.params) + + +def collection(module): + return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params) + + +def create_link(module): + return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances?instanceId={name}".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'alternativeLocationId': module.params.get('alternative_location_id'), + u'authEnabled': response.get(u'authEnabled'), + u'authorizedNetwork': module.params.get('authorized_network'), + u'connectMode': module.params.get('connect_mode'), + u'createTime': response.get(u'createTime'), + u'currentLocationId': response.get(u'currentLocationId'), + u'displayName': response.get(u'displayName'), + u'host': response.get(u'host'), + u'labels': response.get(u'labels'), + u'redisConfigs': response.get(u'redisConfigs'), + u'locationId': module.params.get('location_id'), + u'name': module.params.get('name'), + u'memorySizeGb': response.get(u'memorySizeGb'), + u'port': response.get(u'port'), + u'persistenceIamIdentity': response.get(u'persistenceIamIdentity'), + u'redisVersion': response.get(u'redisVersion'), + u'reservedIpRange': module.params.get('reserved_ip_range'), + u'tier': module.params.get('tier'), + u'transitEncryptionMode': module.params.get('transit_encryption_mode'), + u'serverCaCerts': InstanceServercacertsArray(response.get(u'serverCaCerts', []), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://redis.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class InstanceServercacertsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({}) + + def _response_from_item(self, item): + return remove_nones_from_dict({}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance_info.py new file mode 100644 index 000000000..86b7d1ce4 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance_info.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_redis_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + region: + description: + - The name of the Redis region of the instance. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_redis_instance_info: + region: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + alternativeLocationId: + description: + - Only applicable to STANDARD_HA tier which protects the instance against zonal + failures by provisioning it across two zones. + - If provided, it must be a different zone from the one provided in [locationId]. + returned: success + type: str + authEnabled: + description: + - Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If + set to "true" AUTH is enabled on the instance. + - Default value is "false" meaning AUTH is disabled. + returned: success + type: bool + authorizedNetwork: + description: + - The full name of the Google Compute Engine network to which the instance is + connected. If left unspecified, the default network will be used. + returned: success + type: str + connectMode: + description: + - The connection mode of the Redis instance. + returned: success + type: str + createTime: + description: + - The time the instance was created in RFC3339 UTC "Zulu" format, accurate to + nanoseconds. + returned: success + type: str + currentLocationId: + description: + - The current zone where the Redis endpoint is placed. + - For Basic Tier instances, this will always be the same as the [locationId] + provided by the user at creation time. For Standard Tier instances, this can + be either [locationId] or [alternativeLocationId] and can change after a failover + event. + returned: success + type: str + displayName: + description: + - An arbitrary and optional user-provided name for the instance. + returned: success + type: str + host: + description: + - Hostname or IP address of the exposed Redis endpoint used by clients to connect + to the service. + returned: success + type: str + labels: + description: + - Resource labels to represent user provided metadata. + returned: success + type: dict + redisConfigs: + description: + - Redis configuration parameters, according to U(http://redis.io/topics/config). + - 'Please check Memorystore documentation for the list of supported parameters: + U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs) + .' + returned: success + type: dict + locationId: + description: + - The zone where the instance will be provisioned. If not provided, the service + will choose a zone for the instance. For STANDARD_HA tier, instances will + be created across two zones for protection against zonal failures. If [alternativeLocationId] + is also provided, it must be different from [locationId]. + returned: success + type: str + name: + description: + - The ID of the instance or a fully qualified identifier for the instance. + returned: success + type: str + memorySizeGb: + description: + - Redis memory size in GiB. + returned: success + type: int + port: + description: + - The port number of the exposed Redis endpoint. + returned: success + type: int + persistenceIamIdentity: + description: + - Output only. Cloud IAM identity used by import / export operations to transfer + data to/from Cloud Storage. Format is "serviceAccount:". + - The value may change over time for a given instance so should be checked before + each import/export operation. + returned: success + type: str + redisVersion: + description: + - The version of Redis software. If not provided, latest supported version will + be used. Please check the API documentation linked at the top for the latest + valid values. + returned: success + type: str + reservedIpRange: + description: + - The CIDR range of internal addresses that are reserved for this instance. + If not provided, the service will choose an unused /29 block, for example, + 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with + existing subnets in an authorized network. + returned: success + type: str + tier: + description: + - 'The service tier of the instance. Must be one of these values: - BASIC: standalone + instance - STANDARD_HA: highly available primary/replica instances .' + returned: success + type: str + transitEncryptionMode: + description: + - The TLS mode of the Redis instance, If not provided, TLS is disabled for the + instance. + - "- SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with + server authentcation ." + returned: success + type: str + serverCaCerts: + description: + - List of server CA certificates for the instance. + returned: success + type: complex + contains: + serialNumber: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str + cert: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str + createTime: + description: + - The time when the certificate was created. + returned: success + type: str + expireTime: + description: + - The time when the certificate expires. + returned: success + type: str + sha1Fingerprint: + description: + - Sha1 Fingerprint of the certificate. + returned: success + type: str + region: + description: + - The name of the Redis region of the instance. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'redis') + return auth.list(link, return_if_object, array_name='instances') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project.py b/ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project.py new file mode 100644 index 000000000..045ec6ee3 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_resourcemanager_project +description: +- Represents a GCP Project. A project is a container for ACLs, APIs, App Engine Apps, + VMs, and other Google Cloud Platform resources. +short_description: Creates a GCP Project +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - 'The user-assigned display name of the Project. It must be 4 to 30 characters. + Allowed characters are: lowercase and uppercase letters, numbers, hyphen, single-quote, + double-quote, space, and exclamation point.' + required: false + type: str + labels: + description: + - The labels associated with this Project. + - 'Label keys must be between 1 and 63 characters long and must conform to the + following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.' + - Label values must be between 0 and 63 characters long and must conform to the + regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + - No more than 256 labels can be associated with a given resource. + - Clients should store labels in a representation such as JSON that does not depend + on specific characters being disallowed . + required: false + type: dict + parent: + description: + - A parent organization. + required: false + type: dict + suboptions: + type: + description: + - Must be organization. + required: false + type: str + id: + description: + - Id of the organization. + required: false + type: str + id: + description: + - The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase letters, + digits, or hyphens. It must start with a letter. + - Trailing hyphens are prohibited. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a project + google.cloud.gcp_resourcemanager_project: + name: My Sample Project + id: ansible-test-{{ 10000000000 | random }} + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + parent: + type: organization + id: 636173955921 + state: present +''' + +RETURN = ''' +number: + description: + - Number uniquely identifying the project. + returned: success + type: int +lifecycleState: + description: + - The Project lifecycle state. + returned: success + type: str +name: + description: + - 'The user-assigned display name of the Project. It must be 4 to 30 characters. + Allowed characters are: lowercase and uppercase letters, numbers, hyphen, single-quote, + double-quote, space, and exclamation point.' + returned: success + type: str +createTime: + description: + - Time of creation. + returned: success + type: str +labels: + description: + - The labels associated with this Project. + - 'Label keys must be between 1 and 63 characters long and must conform to the following + regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.' + - Label values must be between 0 and 63 characters long and must conform to the + regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + - No more than 256 labels can be associated with a given resource. + - Clients should store labels in a representation such as JSON that does not depend + on specific characters being disallowed . + returned: success + type: dict +parent: + description: + - A parent organization. + returned: success + type: complex + contains: + type: + description: + - Must be organization. + returned: success + type: str + id: + description: + - Id of the organization. + returned: success + type: str +id: + description: + - The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase letters, + digits, or hyphens. It must start with a letter. + - Trailing hyphens are prohibited. + returned: success + type: str +''' + +ACTIVE = "ACTIVE" + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(type='str'), + labels=dict(type='dict'), + parent=dict(type='dict', options=dict(type=dict(type='str'), id=dict(type='str'))), + id=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + elif fetch.get("lifecycleState") == ACTIVE: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'resourcemanager') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'resourcemanager') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'resourcemanager') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'projectId': module.params.get('id'), + u'name': module.params.get('name'), + u'labels': module.params.get('labels'), + u'parent': ProjectParent(module.params.get('parent', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'resourcemanager') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://cloudresourcemanager.googleapis.com/v1/projects/{id}".format(**module.params) + + +def collection(module): + return "https://cloudresourcemanager.googleapis.com/v1/projects".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + # SQL only: return on 403 if not exist + if allow_not_found and response.status_code == 403: + return None + + try: + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'message']): + module.fail_json(msg=navigate_hash(result, ['error', 'message'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'projectNumber': response.get(u'projectNumber'), + u'lifecycleState': response.get(u'lifecycleState'), + u'name': response.get(u'name'), + u'createTime': response.get(u'createTime'), + u'labels': response.get(u'labels'), + u'parent': ProjectParent(response.get(u'parent', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://cloudresourcemanager.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if not op_result: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class ProjectParent(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'type': self.request.get('type'), u'id': self.request.get('id')}) + + def from_response(self): + return remove_nones_from_dict({u'type': self.request.get(u'type'), u'id': self.request.get(u'id')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project_info.py new file mode 100644 index 000000000..1df386436 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_resourcemanager_project_info.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_resourcemanager_project_info +description: +- Gather info for GCP Project +short_description: Gather info for GCP Project +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str + page_size: + description: + - Indicates the number of projects that should be returned by the API + request + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a project + gcp_resourcemanager_project_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + page_size: 100 +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + number: + description: + - Number uniquely identifying the project. + returned: success + type: int + lifecycleState: + description: + - The Project lifecycle state. + returned: success + type: str + name: + description: + - 'The user-assigned display name of the Project. It must be 4 to 30 characters. + Allowed characters are: lowercase and uppercase letters, numbers, hyphen, + single-quote, double-quote, space, and exclamation point.' + returned: success + type: str + createTime: + description: + - Time of creation. + returned: success + type: str + labels: + description: + - The labels associated with this Project. + - 'Label keys must be between 1 and 63 characters long and must conform to the + following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.' + - Label values must be between 0 and 63 characters long and must conform to + the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + - No more than 256 labels can be associated with a given resource. + - Clients should store labels in a representation such as JSON that does not + depend on specific characters being disallowed . + returned: success + type: dict + parent: + description: + - A parent organization. + returned: success + type: complex + contains: + type: + description: + - Must be organization. + returned: success + type: str + id: + description: + - Id of the organization. + returned: success + type: str + id: + description: + - The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase + letters, digits, or hyphens. It must start with a letter. + - Trailing hyphens are prohibited. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict( + page_size=dict(type='int') + )) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudresourcemanager.googleapis.com/v1/projects".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'resourcemanager') + params = {} + if "page_size" in module.params: + params["pageSize"] = module.params.get("page_size") + return auth.list(link, return_if_object, array_name='projects', params=params) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config.py b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config.py new file mode 100644 index 000000000..cad3c57e7 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_runtimeconfig_config +description: +- A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig service. +- A RuntimeConfig resource consists of metadata and a hierarchy of variables. +short_description: Creates a GCP Config +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + description: + description: + - The description to associate with the runtime config. + required: false + type: str + name: + description: + - The name of the runtime config. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a config + google.cloud.gcp_runtimeconfig_config: + name: test_object + description: My config + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +description: + description: + - The description to associate with the runtime config. + returned: success + type: str +name: + description: + - The name of the runtime config. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import re + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), description=dict(type='str'), name=dict(required=True, type='str') + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudruntimeconfig'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = {u'name': name_pattern(module.params.get('name'), module), u'description': module.params.get('description')} + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs/{name}".format(**module.params) + + +def collection(module): + return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'description': response.get(u'description')} + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/configs/.*" + + if not re.match(regex, name): + name = "projects/{project}/configs/{name}".format(**module.params) + + return name + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config_info.py new file mode 100644 index 000000000..c1aa11bb8 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_config_info.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_runtimeconfig_config_info +description: +- Gather info for GCP Config +short_description: Gather info for GCP Config +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a config + gcp_runtimeconfig_config_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - The description to associate with the runtime config. + returned: success + type: str + name: + description: + - The name of the runtime config. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudruntimeconfig'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'runtimeconfig') + return auth.list(link, return_if_object, array_name='configs') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable.py b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable.py new file mode 100644 index 000000000..6d8de2370 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_runtimeconfig_variable +description: +- Describes a single variable within a runtime config resource. +short_description: Creates a GCP Variable +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + value: + description: + - The binary value of the variable. Either this or `text` can be set. + required: false + type: str + text: + description: + - The string value of the variable. Either this or `value` can be set. + required: false + type: str + name: + description: + - The name of the variable resource. + required: true + type: str + config: + description: + - The name of the runtime config that this variable belongs to. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a config + google.cloud.gcp_runtimeconfig_config: + name: my-config + description: My config + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: config + +- name: create a variable + google.cloud.gcp_runtimeconfig_variable: + name: prod-variables/hostname + config: my-config + text: example.com + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +value: + description: + - The binary value of the variable. Either this or `text` can be set. + returned: success + type: str +text: + description: + - The string value of the variable. Either this or `value` can be set. + returned: success + type: str +name: + description: + - The name of the variable resource. + returned: success + type: str +config: + description: + - The name of the runtime config that this variable belongs to. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import re + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + value=dict(type='str'), + text=dict(type='str'), + name=dict(required=True, type='str'), + config=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudruntimeconfig'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': name_pattern(module.params.get('name'), module), + u'config': module.params.get('config'), + u'value': module.params.get('value'), + u'text': module.params.get('text'), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'runtimeconfig') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs/{config}/variables/{name}".format(**module.params) + + +def collection(module): + return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs/{config}/variables".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'value': response.get(u'value'), u'text': response.get(u'text')} + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/configs/.*/variables/.*" + + if not re.match(regex, name): + name = "projects/{project}/configs/{config}/variables/{name}".format(**module.params) + + return name + + +# `config` is a useful parameter for declarative syntax, but +# is not a part of the GCP API +def encode_request(request, module): + if 'config' in request: + del request['config'] + return request + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable_info.py new file mode 100644 index 000000000..7c882f64b --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_runtimeconfig_variable_info.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_runtimeconfig_variable_info +description: +- Gather info for GCP Variable +short_description: Gather info for GCP Variable +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + config: + description: + - The name of the runtime config that this variable belongs to. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a variable + gcp_runtimeconfig_variable_info: + config: my-config + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + value: + description: + - The binary value of the variable. Either this or `text` can be set. + returned: success + type: str + text: + description: + - The string value of the variable. Either this or `value` can be set. + returned: success + type: str + name: + description: + - The name of the variable resource. + returned: success + type: str + config: + description: + - The name of the runtime config that this variable belongs to. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(config=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloudruntimeconfig'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs/{config}/variables".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'runtimeconfig') + return auth.list(link, return_if_object, array_name='variables') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service.py b/ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service.py new file mode 100644 index 000000000..fff0d7309 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service.py @@ -0,0 +1,447 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_serviceusage_service +description: +- A service that is available for use . +short_description: Creates a GCP Service +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The resource name of the service . + required: true + type: str + disable_dependent_services: + description: + - Indicates if dependent services should also be disabled. Can only be turned + on if service is disabled. + required: false + type: bool + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'Getting Started: U(https://cloud.google.com/service-usage/docs/getting-started)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a service + google.cloud.gcp_serviceusage_service: + name: spanner.googleapis.com + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The resource name of the service . + returned: success + type: str +parent: + description: + - The name of the parent of this service. For example 'projects/123' . + returned: success + type: str +state: + description: + - Whether or not the service has been enabled for use by the consumer. + returned: success + type: str +disableDependentServices: + description: + - Indicates if dependent services should also be disabled. Can only be turned on + if service is disabled. + returned: success + type: bool +config: + description: + - The service configuration of the available service. + returned: success + type: complex + contains: + name: + description: + - The DNS address at which this service is available. + returned: success + type: str + title: + description: + - The product title for this service. + returned: success + type: str + apis: + description: + - The list of API interfaces exported by this service. + returned: success + type: complex + contains: + name: + description: + - Name of the API. + returned: success + type: str + version: + description: + - The version of the API. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import re +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + disable_dependent_services=dict(type='bool'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if module.params['state'] == 'present' and module.params['disable_dependent_services']: + module.fail_json(msg="You cannot enable a service and use the disable_dependent_service option") + + if fetch and fetch.get('state') == 'DISABLED': + fetch = {} + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, delete_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'serviceusage') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link): + auth = GcpSession(module, 'serviceusage') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link): + auth = GcpSession(module, 'serviceusage') + return wait_for_operation(module, auth.post(link)) + + +def resource_to_request(module): + request = {u'disableDependentServices': module.params.get('disable_dependent_services')} + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'serviceusage') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://serviceusage.googleapis.com/v1/projects/{project}/services/{name}".format(**module.params) + + +def collection(module): + return "https://serviceusage.googleapis.com/v1/projects/{project}/services".format(**module.params) + + +def create_link(module): + return "https://serviceusage.googleapis.com/v1/projects/{project}/services/{name}:enable".format(**module.params) + + +def delete_link(module): + return "https://serviceusage.googleapis.com/v1/projects/{project}/services/{name}:disable".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': response.get(u'name'), + u'parent': response.get(u'parent'), + u'state': response.get(u'state'), + u'disableDependentServices': response.get(u'disableDependentServices'), + u'config': ServiceConfig(response.get(u'config', {}), module).from_response(), + } + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/services/.*" + + if not re.match(regex, name): + name = "projects/{project}/services/{name}".format(**module.params) + + return name + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://serviceusage.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class ServiceConfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'name': self.request.get('name'), + u'title': self.request.get('title'), + u'apis': ServiceApisArray(self.request.get('apis', []), self.module).to_request(), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'name': self.request.get(u'name'), + u'title': self.request.get(u'title'), + u'apis': ServiceApisArray(self.request.get(u'apis', []), self.module).from_response(), + } + ) + + +class ServiceApisArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'version': item.get('version')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'version': item.get(u'version')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service_info.py new file mode 100644 index 000000000..7c57233a1 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_serviceusage_service_info.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_serviceusage_service_info +description: +- Gather info for GCP Service +short_description: Gather info for GCP Service +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a service + gcp_serviceusage_service_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The resource name of the service . + returned: success + type: str + parent: + description: + - The name of the parent of this service. For example 'projects/123' . + returned: success + type: str + state: + description: + - Whether or not the service has been enabled for use by the consumer. + returned: success + type: str + disableDependentServices: + description: + - Indicates if dependent services should also be disabled. Can only be turned + on if service is disabled. + returned: success + type: bool + config: + description: + - The service configuration of the available service. + returned: success + type: complex + contains: + name: + description: + - The DNS address at which this service is available. + returned: success + type: str + title: + description: + - The product title for this service. + returned: success + type: str + apis: + description: + - The list of API interfaces exported by this service. + returned: success + type: complex + contains: + name: + description: + - Name of the API. + returned: success + type: str + version: + description: + - The version of the API. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://serviceusage.googleapis.com/v1/projects/{project}/services".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'serviceusage') + return auth.list(link, return_if_object, array_name='services') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository.py b/ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository.py new file mode 100644 index 000000000..4e902c149 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sourcerepo_repository +description: +- A repository (or repo) is a Git repository storing versioned source content. +short_description: Creates a GCP Repository +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - Resource name of the repository, of the form projects/{{project}}/repos/{{repo}}. + - The repo name may contain slashes. eg, projects/myproject/repos/name/with/slash + . + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/source-repositories/docs/reference/rest/v1/projects.repos)' +- 'Official Documentation: U(https://cloud.google.com/source-repositories/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a repository + google.cloud.gcp_sourcerepo_repository: + name: test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - Resource name of the repository, of the form projects/{{project}}/repos/{{repo}}. + - The repo name may contain slashes. eg, projects/myproject/repos/name/with/slash + . + returned: success + type: str +url: + description: + - URL to clone the repository from Google Cloud Source Repositories. + returned: success + type: str +size: + description: + - The disk usage of the repo, in bytes. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import re + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule(argument_spec=dict(state=dict(default='present', choices=['present', 'absent'], type='str'), name=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'sourcerepo') + return return_if_object(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + auth = GcpSession(module, 'sourcerepo') + params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} + request = resource_to_request(module) + del request['name'] + return return_if_object(module, auth.patch(link, request, params=params)) + + +def updateMask(request, response): + update_mask = [] + return ','.join(update_mask) + + +def delete(module, link): + auth = GcpSession(module, 'sourcerepo') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = {u'name': name_pattern(module.params.get('name'), module)} + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'sourcerepo') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://sourcerepo.googleapis.com/v1/projects/{project}/repos/{name}".format(**module.params) + + +def collection(module): + return "https://sourcerepo.googleapis.com/v1/projects/{project}/repos".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'name': name_pattern(module.params.get('name'), module), u'url': response.get(u'url'), u'size': response.get(u'size')} + + +def name_pattern(name, module): + if name is None: + return + + regex = r"projects/.*/repos/.*" + + if not re.match(regex, name): + name = "projects/{project}/repos/{name}".format(**module.params) + + return name + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository_info.py new file mode 100644 index 000000000..a534ade7e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sourcerepo_repository_info.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sourcerepo_repository_info +description: +- Gather info for GCP Repository +short_description: Gather info for GCP Repository +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a repository + gcp_sourcerepo_repository_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Resource name of the repository, of the form projects/{{project}}/repos/{{repo}}. + - The repo name may contain slashes. eg, projects/myproject/repos/name/with/slash + . + returned: success + type: str + url: + description: + - URL to clone the repository from Google Cloud Source Repositories. + returned: success + type: str + size: + description: + - The disk usage of the repo, in bytes. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://sourcerepo.googleapis.com/v1/projects/{project}/repos".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sourcerepo') + return auth.list(link, return_if_object, array_name='repos') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_spanner_database.py b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_database.py new file mode 100644 index 000000000..4d7356fb3 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_database.py @@ -0,0 +1,446 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_spanner_database +description: +- A Cloud Spanner Database which is hosted on a Spanner instance. +short_description: Creates a GCP Database +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - A unique identifier for the database, which cannot be changed after the instance + is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9]. + required: true + type: str + extra_statements: + description: + - 'An optional list of DDL statements to run inside the newly created database. + Statements can create tables, indexes, etc. These statements execute atomically + with the creation of the database: if there is an error in any statement, the + database is not created.' + elements: str + required: false + type: list + encryption_config: + description: + - Encryption configuration for the database . + required: false + type: dict + suboptions: + kms_key_name: + description: + - Fully qualified name of the KMS key to use to encrypt this database. This + key must exist in the same location as the Spanner Database. + required: true + type: str + instance: + description: + - The instance to create the database on. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_spanner_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances.databases)' +- 'Official Documentation: U(https://cloud.google.com/spanner/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_spanner_instance: + name: instance-database + display_name: My Spanner Instance + node_count: 2 + labels: + cost_center: ti-1700004 + config: regional-us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instance + +- name: create a database + google.cloud.gcp_spanner_database: + name: webstore + instance: "{{ instance }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - A unique identifier for the database, which cannot be changed after the instance + is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9]. + returned: success + type: str +extraStatements: + description: + - 'An optional list of DDL statements to run inside the newly created database. + Statements can create tables, indexes, etc. These statements execute atomically + with the creation of the database: if there is an error in any statement, the + database is not created.' + returned: success + type: list +encryptionConfig: + description: + - Encryption configuration for the database . + returned: success + type: complex + contains: + kmsKeyName: + description: + - Fully qualified name of the KMS key to use to encrypt this database. This + key must exist in the same location as the Spanner Database. + returned: success + type: str +instance: + description: + - The instance to create the database on. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + extra_statements=dict(type='list', elements='str'), + encryption_config=dict(type='dict', options=dict(kms_key_name=dict(required=True, type='str'))), + instance=dict(required=True, type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if 'instance' in module.params and 'name' in module.params['instance']: + module.params['instance']['name'] = module.params['instance']['name'].split('/')[-1] + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'spanner') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + module.fail_json(msg="Spanner objects can't be updated to ensure data safety") + + +def update_fields(module, request, response): + if response.get('extraStatements') != request.get('extraStatements'): + extra_statements_update(module, request, response) + + +def extra_statements_update(module, request, response): + auth = GcpSession(module, 'spanner') + auth.patch( + ''.join(["https://spanner.googleapis.com/v1/", "projects/{project}/instances/{instance}/databases/{name}/ddl"]).format(**module.params), + {u'extraStatements': module.params.get('extra_statements')}, + ) + + +def delete(module, link): + auth = GcpSession(module, 'spanner') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'extraStatements': module.params.get('extra_statements'), + u'encryptionConfig': DatabaseEncryptionconfig(module.params.get('encryption_config', {}), module).to_request(), + } + request = encode_request(request, module) + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'spanner') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name'), 'name': module.params['name']} + return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases/{name}".format(**res) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': module.params.get('name'), + u'extraStatements': response.get(u'extraStatements'), + u'encryptionConfig': DatabaseEncryptionconfig(response.get(u'encryptionConfig', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://spanner.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def decode_response(response, module): + if not response: + return response + + if 'name' not in response: + return response + + if '/operations/' in response['name']: + return response + + response['name'] = response['name'].split('/')[-1] + return response + + +def encode_request(request, module): + request['create_statement'] = "CREATE DATABASE `{0}`".format(module.params['name']) + del request['name'] + return request + + +class DatabaseEncryptionconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')}) + + def from_response(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_spanner_database_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_database_info.py new file mode 100644 index 000000000..2f11f1ce4 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_database_info.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_spanner_database_info +description: +- Gather info for GCP Database +short_description: Gather info for GCP Database +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + instance: + description: + - The instance to create the database on. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_spanner_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a database + gcp_spanner_database_info: + instance: "{{ instance }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - A unique identifier for the database, which cannot be changed after the instance + is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9]. + returned: success + type: str + extraStatements: + description: + - 'An optional list of DDL statements to run inside the newly created database. + Statements can create tables, indexes, etc. These statements execute atomically + with the creation of the database: if there is an error in any statement, + the database is not created.' + returned: success + type: list + encryptionConfig: + description: + - Encryption configuration for the database . + returned: success + type: complex + contains: + kmsKeyName: + description: + - Fully qualified name of the KMS key to use to encrypt this database. This + key must exist in the same location as the Spanner Database. + returned: success + type: str + instance: + description: + - The instance to create the database on. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'spanner') + return auth.list(link, return_if_object, array_name='databases') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance.py new file mode 100644 index 000000000..8458042a0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_spanner_instance +description: +- An isolated set of Cloud Spanner resources on which databases can be hosted. +short_description: Creates a GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - A unique identifier for the instance, which cannot be changed after the instance + is created. The name must be between 6 and 30 characters in length. + required: true + type: str + config: + description: + - The name of the instance's configuration (similar but not quite the same as + a region) which defines the geographic placement and replication of your databases + in this instance. It determines where your data is stored. Values are typically + of the form `regional-europe-west1` , `us-central` etc. + - In order to obtain a valid list please consult the [Configuration section of + the docs](U(https://cloud.google.com/spanner/docs/instances)). + required: true + type: str + display_name: + description: + - The descriptive name for this instance as it appears in UIs. Must be unique + per project and between 4 and 30 characters in length. + required: true + type: str + node_count: + description: + - The number of nodes allocated to this instance. At most one of either node_count + or processing_units can be present in terraform. . + required: false + type: int + processing_units: + description: + - The number of processing units allocated to this instance. At most one of processing_units + or node_count can be present in terraform. . + required: false + type: int + labels: + description: + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + required: false + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances)' +- 'Official Documentation: U(https://cloud.google.com/spanner/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_spanner_instance: + name: testinstance + display_name: My Spanner Instance + node_count: 2 + labels: + cost_center: ti-1700004 + config: regional-us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - A unique identifier for the instance, which cannot be changed after the instance + is created. The name must be between 6 and 30 characters in length. + returned: success + type: str +config: + description: + - The name of the instance's configuration (similar but not quite the same as a + region) which defines the geographic placement and replication of your databases + in this instance. It determines where your data is stored. Values are typically + of the form `regional-europe-west1` , `us-central` etc. + - In order to obtain a valid list please consult the [Configuration section of the + docs](U(https://cloud.google.com/spanner/docs/instances)). + returned: success + type: str +displayName: + description: + - The descriptive name for this instance as it appears in UIs. Must be unique per + project and between 4 and 30 characters in length. + returned: success + type: str +nodeCount: + description: + - The number of nodes allocated to this instance. At most one of either node_count + or processing_units can be present in terraform. . + returned: success + type: int +processingUnits: + description: + - The number of processing units allocated to this instance. At most one of processing_units + or node_count can be present in terraform. . + returned: success + type: int +labels: + description: + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + config=dict(required=True, type='str'), + display_name=dict(required=True, type='str'), + node_count=dict(type='int'), + processing_units=dict(type='int'), + labels=dict(type='dict'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module)) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'spanner') + return wait_for_operation(module, auth.post(link, resource_to_create(module))) + + +def update(module, link): + module.fail_json(msg="Spanner objects can't be updated to ensure data safety") + + +def delete(module, link): + auth = GcpSession(module, 'spanner') + return return_if_object(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'config': module.params.get('config'), + u'displayName': module.params.get('display_name'), + u'nodeCount': module.params.get('node_count'), + u'processingUnits': module.params.get('processing_units'), + u'labels': module.params.get('labels'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'spanner') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://spanner.googleapis.com/v1/projects/{project}/instances/{name}".format(**module.params) + + +def collection(module): + return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + result = decode_response(result, module) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + request = decode_response(request, module) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': module.params.get('name'), + u'config': module.params.get('config'), + u'displayName': response.get(u'displayName'), + u'nodeCount': response.get(u'nodeCount'), + u'processingUnits': response.get(u'processingUnits'), + u'labels': response.get(u'labels'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://spanner.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +def resource_to_create(module): + instance = resource_to_request(module) + instance['name'] = "projects/{0}/instances/{1}".format(module.params['project'], module.params['name']) + instance['config'] = "projects/{0}/instanceConfigs/{1}".format(module.params['project'], instance['config']) + return {'instanceId': module.params['name'], 'instance': instance} + + +def resource_to_update(module): + instance = resource_to_request(module) + instance['name'] = "projects/{0}/instances/{1}".format(module.params['project'], module.params['name']) + instance['config'] = "projects/{0}/instanceConfigs/{1}".format(module.params['project'], instance['config']) + return {'instance': instance, 'fieldMask': "'name' ,'config' ,'displayName' ,'nodeCount' ,'processingUnits' ,'labels'"} + + +def decode_response(response, module): + if not response: + return response + + if '/operations/' in response['name']: + return response + + response['name'] = response['name'].split('/')[-1] + response['config'] = response['config'].split('/')[-1] + return response + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance_info.py new file mode 100644 index 000000000..1fc5fce4f --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_spanner_instance_info.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_spanner_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_spanner_instance_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - A unique identifier for the instance, which cannot be changed after the instance + is created. The name must be between 6 and 30 characters in length. + returned: success + type: str + config: + description: + - The name of the instance's configuration (similar but not quite the same as + a region) which defines the geographic placement and replication of your databases + in this instance. It determines where your data is stored. Values are typically + of the form `regional-europe-west1` , `us-central` etc. + - In order to obtain a valid list please consult the [Configuration section + of the docs](U(https://cloud.google.com/spanner/docs/instances)). + returned: success + type: str + displayName: + description: + - The descriptive name for this instance as it appears in UIs. Must be unique + per project and between 4 and 30 characters in length. + returned: success + type: str + nodeCount: + description: + - The number of nodes allocated to this instance. At most one of either node_count + or processing_units can be present in terraform. . + returned: success + type: int + processingUnits: + description: + - The number of processing units allocated to this instance. At most one of + processing_units or node_count can be present in terraform. . + returned: success + type: int + labels: + description: + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'spanner') + return auth.list(link, return_if_object, array_name='instances') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_database.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_database.py new file mode 100644 index 000000000..c43673daf --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_database.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_database +description: +- Represents a SQL database inside the Cloud SQL instance, hosted in Google's cloud. +short_description: Creates a GCP Database +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + charset: + description: + - The charset value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Character Set Support](U(https://www.postgresql.org/docs/9.6/static/multibyte.html)) + for more details and supported values. Postgres databases only support a value + of `UTF8` at creation time. + required: false + type: str + collation: + description: + - The collation value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Collation Support](U(https://www.postgresql.org/docs/9.6/static/collation.html)) + for more details and supported values. Postgres databases only support a value + of `en_US.UTF8` at creation time. + required: false + type: str + name: + description: + - The name of the database in the Cloud SQL instance. + - This does not include the project ID or instance name. + required: true + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_sql_instance: + name: "{{resource_name}}-3" + settings: + ip_configuration: + authorized_networks: + - name: google dns server + value: 8.8.8.8/32 + tier: db-n1-standard-1 + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instance + +- name: create a database + google.cloud.gcp_sql_database: + name: test_object + charset: utf8 + instance: "{{ instance.name }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +charset: + description: + - The charset value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Character Set Support](U(https://www.postgresql.org/docs/9.6/static/multibyte.html)) + for more details and supported values. Postgres databases only support a value + of `UTF8` at creation time. + returned: success + type: str +collation: + description: + - The collation value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Collation Support](U(https://www.postgresql.org/docs/9.6/static/collation.html)) + for more details and supported values. Postgres databases only support a value + of `en_US.UTF8` at creation time. + returned: success + type: str +name: + description: + - The name of the database in the Cloud SQL instance. + - This does not include the project ID or instance name. + returned: success + type: str +instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + charset=dict(type='str'), + collation=dict(type='str'), + name=dict(required=True, type='str'), + instance=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + state = module.params['state'] + kind = 'sql#database' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + module.fail_json(msg="SQL objects can't be updated to ensure data safety") + + +def delete(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'sql#database', + u'instance': module.params.get('instance'), + u'charset': module.params.get('charset'), + u'collation': module.params.get('collation'), + u'name': module.params.get('name'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'sql') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases/{name}".format(**module.params) + + +def collection(module): + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + # SQL only: return on 403 if not exist + if allow_not_found and response.status_code == 403: + return None + + try: + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'charset': response.get(u'charset'), u'collation': response.get(u'collation'), u'name': module.params.get('name')} + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'sql#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'sql#database') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'sql#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_database_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_database_info.py new file mode 100644 index 000000000..4938076fd --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_database_info.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_database_info +description: +- Gather info for GCP Database +short_description: Gather info for GCP Database +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a database + gcp_sql_database_info: + instance: "{{ instance.name }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + charset: + description: + - The charset value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Character Set Support](U(https://www.postgresql.org/docs/9.6/static/multibyte.html)) + for more details and supported values. Postgres databases only support a value + of `UTF8` at creation time. + returned: success + type: str + collation: + description: + - The collation value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Collation Support](U(https://www.postgresql.org/docs/9.6/static/collation.html)) + for more details and supported values. Postgres databases only support a value + of `en_US.UTF8` at creation time. + returned: success + type: str + name: + description: + - The name of the database in the Cloud SQL instance. + - This does not include the project ID or instance name. + returned: success + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(instance=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sql') + return auth.list(link, return_if_object, array_name='items') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_instance.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_instance.py new file mode 100644 index 000000000..bb19ecf9e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_instance.py @@ -0,0 +1,1388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_instance +description: +- Represents a Cloud SQL instance. Cloud SQL instances are SQL databases hosted in + Google's cloud. The Instances resource provides methods for common configuration + and management tasks. +short_description: Creates a GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + backend_type: + description: + - "* FIRST_GEN: First Generation instance. MySQL only." + - "* SECOND_GEN: Second Generation instance or PostgreSQL instance." + - "* EXTERNAL: A database server that is not managed by Google." + - 'Some valid choices include: "FIRST_GEN", "SECOND_GEN", "EXTERNAL"' + required: false + type: str + connection_name: + description: + - Connection name of the Cloud SQL instance used in connection strings. + required: false + type: str + database_version: + description: + - The database engine type and version. For First Generation instances, can be + MYSQL_5_5, or MYSQL_5_6. For Second Generation instances, can be MYSQL_5_6 or + MYSQL_5_7. Defaults to MYSQL_5_6. + - 'PostgreSQL instances: POSTGRES_9_6 The databaseVersion property can not be + changed after instance creation.' + - 'Some valid choices include: "MYSQL_5_5", "MYSQL_5_6", "MYSQL_5_7", "POSTGRES_9_6"' + required: false + type: str + failover_replica: + description: + - The name and status of the failover replica. This property is applicable only + to Second Generation instances. + required: false + type: dict + suboptions: + name: + description: + - The name of the failover replica. If specified at instance creation, a failover + replica is created for the instance. The name doesn't include the project + ID. This property is applicable only to Second Generation instances. + required: false + type: str + instance_type: + description: + - The instance type. This can be one of the following. + - "* CLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from a master." + - "* ON_PREMISES_INSTANCE: An instance running on the customer's premises." + - "* READ_REPLICA_INSTANCE: A Cloud SQL instance configured as a read-replica." + - 'Some valid choices include: "CLOUD_SQL_INSTANCE", "ON_PREMISES_INSTANCE", "READ_REPLICA_INSTANCE"' + required: false + type: str + ipv6_address: + description: + - The IPv6 address assigned to the instance. This property is applicable only + to First Generation instances. + required: false + type: str + master_instance_name: + description: + - The name of the instance which will act as master in the replication setup. + required: false + type: str + max_disk_size: + description: + - The maximum disk size of the instance in bytes. + required: false + type: int + name: + description: + - Name of the Cloud SQL instance. This does not include the project ID. + required: true + type: str + region: + description: + - The geographical region. Defaults to us-central or us-central1 depending on + the instance type (First Generation or Second Generation/PostgreSQL). + required: false + type: str + replica_configuration: + description: + - Configuration specific to failover replicas and read replicas. + required: false + type: dict + suboptions: + failover_target: + description: + - Specifies if the replica is the failover target. If the field is set to + true the replica will be designated as a failover replica. + - In case the master instance fails, the replica instance will be promoted + as the new master instance. + - Only one replica can be specified as failover target, and the replica has + to be in different zone with the master instance. + required: false + type: bool + mysql_replica_configuration: + description: + - MySQL specific configuration when replicating from a MySQL on-premises master. + Replication configuration information such as the username, password, certificates, + and keys are not stored in the instance metadata. The configuration information + is used only to set up the replication connection and is stored by MySQL + in a file named master.info in the data directory. + required: false + type: dict + suboptions: + ca_certificate: + description: + - PEM representation of the trusted CA's x509 certificate. + required: false + type: str + client_certificate: + description: + - PEM representation of the replica's x509 certificate . + required: false + type: str + client_key: + description: + - PEM representation of the replica's private key. The corresponding public + key is encoded in the client's certificate. + required: false + type: str + connect_retry_interval: + description: + - Seconds to wait between connect retries. MySQL's default is 60 seconds. + required: false + type: int + dump_file_path: + description: + - Path to a SQL dump file in Google Cloud Storage from which the replica + instance is to be created. The URI is in the form gs://bucketName/fileName. + Compressed gzip files (.gz) are also supported. Dumps should have the + binlog coordinates from which replication should begin. This can be + accomplished by setting --master-data to 1 when using mysqldump. + required: false + type: str + master_heartbeat_period: + description: + - Interval in milliseconds between replication heartbeats. + required: false + type: int + password: + description: + - The password for the replication connection. + required: false + type: str + ssl_cipher: + description: + - A list of permissible ciphers to use for SSL encryption. + required: false + type: str + username: + description: + - The username for the replication connection. + required: false + type: str + verify_server_certificate: + description: + - Whether or not to check the master's Common Name value in the certificate + that it sends during the SSL handshake. + required: false + type: bool + replica_names: + description: + - The replicas of the instance. + elements: str + required: false + type: list + service_account_email_address: + description: + - The service account email address assigned to the instance. This property + is applicable only to Second Generation instances. + required: false + type: str + settings: + description: + - The user settings. + required: false + type: dict + suboptions: + database_flags: + description: + - The database flags passed to the instance at startup. + elements: dict + required: false + type: list + suboptions: + name: + description: + - The name of the flag. These flags are passed at instance startup, so + include both server options and system variables for MySQL. Flags should + be specified with underscores, not hyphens. + required: false + type: str + value: + description: + - The value of the flag. Booleans should be set to on for true and off + for false. This field must be omitted if the flag doesn't take a value. + required: false + type: str + ip_configuration: + description: + - The settings for IP Management. This allows to enable or disable the instance + IP and manage which external networks can connect to the instance. The IPv4 + address cannot be disabled for Second Generation instances. + required: false + type: dict + suboptions: + ipv4_enabled: + description: + - Whether the instance should be assigned an IP address or not. + required: false + type: bool + private_network: + description: + - The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP + (e.g /projects/myProject/global/networks/default) + required: false + type: str + authorized_networks: + description: + - The list of external networks that are allowed to connect to the instance + using the IP. In CIDR notation, also known as 'slash' notation (e.g. + 192.168.100.0/24). + elements: dict + required: false + type: list + suboptions: + expiration_time: + description: + - The time when this access control entry expires in RFC 3339 format, + for example 2012-11-15T16:19:00.094Z. + required: false + type: str + name: + description: + - An optional label to identify this entry. + required: false + type: str + value: + description: + - The whitelisted value for the access control list. For example, + to grant access to a client from an external IP (IPv4 or IPv6) address + or subnet, use that address or subnet here. + required: false + type: str + require_ssl: + description: + - Whether the mysqld should default to 'REQUIRE X509' for users connecting + over IP. + required: false + type: bool + tier: + description: + - The tier or machine type for this instance, for example db-n1-standard-1. + For MySQL instances, this field determines whether the instance is Second + Generation (recommended) or First Generation. + required: false + type: str + availability_type: + description: + - The availabilityType define if your postgres instance is run zonal or regional. + - 'Some valid choices include: "ZONAL", "REGIONAL"' + required: false + type: str + backup_configuration: + description: + - The daily backup configuration for the instance. + required: false + type: dict + suboptions: + enabled: + description: + - Enable Autobackup for your instance. + required: false + type: bool + binary_log_enabled: + description: + - Whether binary log is enabled. If backup configuration is disabled, + binary log must be disabled as well. MySQL only. + required: false + type: bool + start_time: + description: + - Define the backup start time in UTC (HH:MM) . + required: false + type: str + user_labels: + description: + - User-provided labels, represented as a dictionary where each label is a + single key value pair. + required: false + type: dict + disk_encryption_configuration: + description: + - Disk encryption settings. + required: false + type: dict + suboptions: + kms_key_name: + description: + - The KMS key used to encrypt the Cloud SQL instance . + required: false + type: str + disk_encryption_status: + description: + - Disk encryption status. + required: false + type: dict + suboptions: + kms_key_version_name: + description: + - The KMS key version used to encrypt the Cloud SQL instance . + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_sql_instance: + name: "{{resource_name}}-2" + settings: + ip_configuration: + authorized_networks: + - name: google dns server + value: 8.8.8.8/32 + tier: db-n1-standard-1 + region: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +backendType: + description: + - "* FIRST_GEN: First Generation instance. MySQL only." + - "* SECOND_GEN: Second Generation instance or PostgreSQL instance." + - "* EXTERNAL: A database server that is not managed by Google." + returned: success + type: str +connectionName: + description: + - Connection name of the Cloud SQL instance used in connection strings. + returned: success + type: str +databaseVersion: + description: + - The database engine type and version. For First Generation instances, can be MYSQL_5_5, + or MYSQL_5_6. For Second Generation instances, can be MYSQL_5_6 or MYSQL_5_7. + Defaults to MYSQL_5_6. + - 'PostgreSQL instances: POSTGRES_9_6 The databaseVersion property can not be changed + after instance creation.' + returned: success + type: str +failoverReplica: + description: + - The name and status of the failover replica. This property is applicable only + to Second Generation instances. + returned: success + type: complex + contains: + available: + description: + - The availability status of the failover replica. A false status indicates + that the failover replica is out of sync. The master can only failover to + the failover replica when the status is true. + returned: success + type: bool + name: + description: + - The name of the failover replica. If specified at instance creation, a failover + replica is created for the instance. The name doesn't include the project + ID. This property is applicable only to Second Generation instances. + returned: success + type: str +instanceType: + description: + - The instance type. This can be one of the following. + - "* CLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from a master." + - "* ON_PREMISES_INSTANCE: An instance running on the customer's premises." + - "* READ_REPLICA_INSTANCE: A Cloud SQL instance configured as a read-replica." + returned: success + type: str +ipAddresses: + description: + - The assigned IP addresses for the instance. + returned: success + type: complex + contains: + ipAddress: + description: + - The IP address assigned. + returned: success + type: str + timeToRetire: + description: + - The due time for this IP to be retired in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + This field is only available when the IP is scheduled to be retired. + returned: success + type: str + type: + description: + - The type of this IP address. A PRIMARY address is an address that can accept + incoming connections. An OUTGOING address is the source address of connections + originating from the instance, if supported. + returned: success + type: str +ipv6Address: + description: + - The IPv6 address assigned to the instance. This property is applicable only to + First Generation instances. + returned: success + type: str +masterInstanceName: + description: + - The name of the instance which will act as master in the replication setup. + returned: success + type: str +maxDiskSize: + description: + - The maximum disk size of the instance in bytes. + returned: success + type: int +name: + description: + - Name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: str +region: + description: + - The geographical region. Defaults to us-central or us-central1 depending on the + instance type (First Generation or Second Generation/PostgreSQL). + returned: success + type: str +replicaConfiguration: + description: + - Configuration specific to failover replicas and read replicas. + returned: success + type: complex + contains: + failoverTarget: + description: + - Specifies if the replica is the failover target. If the field is set to true + the replica will be designated as a failover replica. + - In case the master instance fails, the replica instance will be promoted as + the new master instance. + - Only one replica can be specified as failover target, and the replica has + to be in different zone with the master instance. + returned: success + type: bool + mysqlReplicaConfiguration: + description: + - MySQL specific configuration when replicating from a MySQL on-premises master. + Replication configuration information such as the username, password, certificates, + and keys are not stored in the instance metadata. The configuration information + is used only to set up the replication connection and is stored by MySQL in + a file named master.info in the data directory. + returned: success + type: complex + contains: + caCertificate: + description: + - PEM representation of the trusted CA's x509 certificate. + returned: success + type: str + clientCertificate: + description: + - PEM representation of the replica's x509 certificate . + returned: success + type: str + clientKey: + description: + - PEM representation of the replica's private key. The corresponding public + key is encoded in the client's certificate. + returned: success + type: str + connectRetryInterval: + description: + - Seconds to wait between connect retries. MySQL's default is 60 seconds. + returned: success + type: int + dumpFilePath: + description: + - Path to a SQL dump file in Google Cloud Storage from which the replica + instance is to be created. The URI is in the form gs://bucketName/fileName. + Compressed gzip files (.gz) are also supported. Dumps should have the + binlog coordinates from which replication should begin. This can be accomplished + by setting --master-data to 1 when using mysqldump. + returned: success + type: str + masterHeartbeatPeriod: + description: + - Interval in milliseconds between replication heartbeats. + returned: success + type: int + password: + description: + - The password for the replication connection. + returned: success + type: str + sslCipher: + description: + - A list of permissible ciphers to use for SSL encryption. + returned: success + type: str + username: + description: + - The username for the replication connection. + returned: success + type: str + verifyServerCertificate: + description: + - Whether or not to check the master's Common Name value in the certificate + that it sends during the SSL handshake. + returned: success + type: bool + replicaNames: + description: + - The replicas of the instance. + returned: success + type: list + serviceAccountEmailAddress: + description: + - The service account email address assigned to the instance. This property + is applicable only to Second Generation instances. + returned: success + type: str +settings: + description: + - The user settings. + returned: success + type: complex + contains: + databaseFlags: + description: + - The database flags passed to the instance at startup. + returned: success + type: complex + contains: + name: + description: + - The name of the flag. These flags are passed at instance startup, so include + both server options and system variables for MySQL. Flags should be specified + with underscores, not hyphens. + returned: success + type: str + value: + description: + - The value of the flag. Booleans should be set to on for true and off for + false. This field must be omitted if the flag doesn't take a value. + returned: success + type: str + ipConfiguration: + description: + - The settings for IP Management. This allows to enable or disable the instance + IP and manage which external networks can connect to the instance. The IPv4 + address cannot be disabled for Second Generation instances. + returned: success + type: complex + contains: + ipv4Enabled: + description: + - Whether the instance should be assigned an IP address or not. + returned: success + type: bool + authorizedNetworks: + description: + - The list of external networks that are allowed to connect to the instance + using the IP. In CIDR notation, also known as 'slash' notation (e.g. 192.168.100.0/24). + returned: success + type: complex + contains: + expirationTime: + description: + - The time when this access control entry expires in RFC 3339 format, + for example 2012-11-15T16:19:00.094Z. + returned: success + type: str + name: + description: + - An optional label to identify this entry. + returned: success + type: str + value: + description: + - The whitelisted value for the access control list. For example, to + grant access to a client from an external IP (IPv4 or IPv6) address + or subnet, use that address or subnet here. + returned: success + type: str + requireSsl: + description: + - Whether the mysqld should default to 'REQUIRE X509' for users connecting + over IP. + returned: success + type: bool + tier: + description: + - The tier or machine type for this instance, for example db-n1-standard-1. + For MySQL instances, this field determines whether the instance is Second + Generation (recommended) or First Generation. + returned: success + type: str + availabilityType: + description: + - The availabilityType define if your postgres instance is run zonal or regional. + returned: success + type: str + backupConfiguration: + description: + - The daily backup configuration for the instance. + returned: success + type: complex + contains: + enabled: + description: + - Enable Autobackup for your instance. + returned: success + type: bool + binaryLogEnabled: + description: + - Whether binary log is enabled. If backup configuration is disabled, binary + log must be disabled as well. MySQL only. + returned: success + type: bool + startTime: + description: + - Define the backup start time in UTC (HH:MM) . + returned: success + type: str + settingsVersion: + description: + - The version of instance settings. This is a required field for update method + to make sure concurrent updates are handled properly. + - During update, use the most recent settingsVersion value for this instance + and do not try to update this value. + returned: success + type: int + userLabels: + description: + - User-provided labels, represented as a dictionary where each label is a single + key value pair. + returned: success + type: dict +gceZone: + description: + - The Compute Engine zone that the instance is currently serving from. This value + could be different from the zone that was specified when the instance was created + if the instance has failed over to its secondary zone. + returned: success + type: str +state: + description: + - The current serving state of the database instance. + returned: success + type: str +diskEncryptionConfiguration: + description: + - Disk encryption settings. + returned: success + type: complex + contains: + kmsKeyName: + description: + - The KMS key used to encrypt the Cloud SQL instance . + returned: success + type: str +diskEncryptionStatus: + description: + - Disk encryption status. + returned: success + type: complex + contains: + kmsKeyVersionName: + description: + - The KMS key version used to encrypt the Cloud SQL instance . + returned: success + type: str +serverCaCert: + description: + - SSL configuration. + returned: success + type: complex + contains: + cert: + description: + - PEM representation of the X.509 certificate. + returned: success + type: str + certSerialNumber: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str + commonName: + description: + - User supplied name. Constrained to [a-zA-Z.-_ ]+. + returned: success + type: str + createTime: + description: + - The time when the certificate was created in RFC 3339 format, for example + 2012-11-15T16:19:00.094Z. + returned: success + type: str + expirationTime: + description: + - The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + returned: success + type: str + sha1Fingerprint: + description: + - SHA-1 fingerprint of the certificate. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + backend_type=dict(type='str'), + connection_name=dict(type='str'), + database_version=dict(type='str'), + failover_replica=dict(type='dict', options=dict(name=dict(type='str'))), + instance_type=dict(type='str'), + ipv6_address=dict(type='str'), + master_instance_name=dict(type='str'), + max_disk_size=dict(type='int'), + name=dict(required=True, type='str'), + region=dict(type='str'), + replica_configuration=dict( + type='dict', + options=dict( + failover_target=dict(type='bool'), + mysql_replica_configuration=dict( + type='dict', + options=dict( + ca_certificate=dict(type='str'), + client_certificate=dict(type='str'), + client_key=dict(type='str'), + connect_retry_interval=dict(type='int'), + dump_file_path=dict(type='str'), + master_heartbeat_period=dict(type='int'), + password=dict(type='str'), + ssl_cipher=dict(type='str'), + username=dict(type='str'), + verify_server_certificate=dict(type='bool'), + ), + ), + replica_names=dict(type='list', elements='str'), + service_account_email_address=dict(type='str'), + ), + ), + settings=dict( + type='dict', + options=dict( + database_flags=dict(type='list', elements='dict', options=dict(name=dict(type='str'), value=dict(type='str'))), + ip_configuration=dict( + type='dict', + options=dict( + ipv4_enabled=dict(type='bool'), + private_network=dict(type='str'), + authorized_networks=dict( + type='list', elements='dict', options=dict(expiration_time=dict(type='str'), name=dict(type='str'), value=dict(type='str')) + ), + require_ssl=dict(type='bool'), + ), + ), + tier=dict(type='str'), + availability_type=dict(type='str'), + backup_configuration=dict( + type='dict', options=dict(enabled=dict(type='bool'), binary_log_enabled=dict(type='bool'), start_time=dict(type='str')) + ), + user_labels=dict(type='dict'), + ), + ), + disk_encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(type='str'))), + disk_encryption_status=dict(type='dict', options=dict(kms_key_version_name=dict(type='str'))), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + state = module.params['state'] + kind = 'sql#instance' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind, fetch) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind, fetch) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind, fetch): + module.fail_json(msg="SQL objects can't be updated to ensure data safety") + + +def delete(module, link, kind, fetch): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'sql#instance', + u'backendType': module.params.get('backend_type'), + u'connectionName': module.params.get('connection_name'), + u'databaseVersion': module.params.get('database_version'), + u'failoverReplica': InstanceFailoverreplica(module.params.get('failover_replica', {}), module).to_request(), + u'instanceType': module.params.get('instance_type'), + u'ipv6Address': module.params.get('ipv6_address'), + u'masterInstanceName': module.params.get('master_instance_name'), + u'maxDiskSize': module.params.get('max_disk_size'), + u'name': module.params.get('name'), + u'region': module.params.get('region'), + u'replicaConfiguration': InstanceReplicaconfiguration(module.params.get('replica_configuration', {}), module).to_request(), + u'settings': InstanceSettings(module.params.get('settings', {}), module).to_request(), + u'diskEncryptionConfiguration': InstanceDiskencryptionconfiguration(module.params.get('disk_encryption_configuration', {}), module).to_request(), + u'diskEncryptionStatus': InstanceDiskencryptionstatus(module.params.get('disk_encryption_status', {}), module).to_request(), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'sql') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{name}".format(**module.params) + + +def collection(module): + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + # SQL only: return on 403 if not exist + if allow_not_found and response.status_code == 403: + return None + + try: + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'backendType': response.get(u'backendType'), + u'connectionName': response.get(u'connectionName'), + u'databaseVersion': response.get(u'databaseVersion'), + u'failoverReplica': InstanceFailoverreplica(response.get(u'failoverReplica', {}), module).from_response(), + u'instanceType': response.get(u'instanceType'), + u'ipAddresses': InstanceIpaddressesArray(response.get(u'ipAddresses', []), module).from_response(), + u'ipv6Address': response.get(u'ipv6Address'), + u'masterInstanceName': response.get(u'masterInstanceName'), + u'maxDiskSize': response.get(u'maxDiskSize'), + u'name': response.get(u'name'), + u'region': response.get(u'region'), + u'replicaConfiguration': InstanceReplicaconfiguration(response.get(u'replicaConfiguration', {}), module).from_response(), + u'settings': InstanceSettings(response.get(u'settings', {}), module).from_response(), + u'gceZone': response.get(u'gceZone'), + u'state': response.get(u'state'), + u'diskEncryptionConfiguration': InstanceDiskencryptionconfiguration(response.get(u'diskEncryptionConfiguration', {}), module).from_response(), + u'diskEncryptionStatus': InstanceDiskencryptionstatus(response.get(u'diskEncryptionStatus', {}), module).from_response(), + u'serverCaCert': InstanceServercacert(response.get(u'serverCaCert', {}), module).from_response(), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'sql#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'sql#instance') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'sql#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class InstanceFailoverreplica(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'name': self.request.get('name')}) + + def from_response(self): + return remove_nones_from_dict({u'name': self.request.get(u'name')}) + + +class InstanceIpaddressesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'ipAddress': item.get('ip_address'), u'timeToRetire': item.get('time_to_retire'), u'type': item.get('type')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'ipAddress': item.get(u'ipAddress'), u'timeToRetire': item.get(u'timeToRetire'), u'type': item.get(u'type')}) + + +class InstanceReplicaconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'failoverTarget': self.request.get('failover_target'), + u'mysqlReplicaConfiguration': InstanceMysqlreplicaconfiguration(self.request.get('mysql_replica_configuration', {}), self.module).to_request(), + u'replicaNames': self.request.get('replica_names'), + u'serviceAccountEmailAddress': self.request.get('service_account_email_address'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'failoverTarget': self.request.get(u'failoverTarget'), + u'mysqlReplicaConfiguration': InstanceMysqlreplicaconfiguration( + self.request.get(u'mysqlReplicaConfiguration', {}), self.module + ).from_response(), + u'replicaNames': self.request.get(u'replicaNames'), + u'serviceAccountEmailAddress': self.request.get(u'serviceAccountEmailAddress'), + } + ) + + +class InstanceMysqlreplicaconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'caCertificate': self.request.get('ca_certificate'), + u'clientCertificate': self.request.get('client_certificate'), + u'clientKey': self.request.get('client_key'), + u'connectRetryInterval': self.request.get('connect_retry_interval'), + u'dumpFilePath': self.request.get('dump_file_path'), + u'masterHeartbeatPeriod': self.request.get('master_heartbeat_period'), + u'password': self.request.get('password'), + u'sslCipher': self.request.get('ssl_cipher'), + u'username': self.request.get('username'), + u'verifyServerCertificate': self.request.get('verify_server_certificate'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'caCertificate': self.request.get(u'caCertificate'), + u'clientCertificate': self.request.get(u'clientCertificate'), + u'clientKey': self.request.get(u'clientKey'), + u'connectRetryInterval': self.request.get(u'connectRetryInterval'), + u'dumpFilePath': self.request.get(u'dumpFilePath'), + u'masterHeartbeatPeriod': self.request.get(u'masterHeartbeatPeriod'), + u'password': self.request.get(u'password'), + u'sslCipher': self.request.get(u'sslCipher'), + u'username': self.request.get(u'username'), + u'verifyServerCertificate': self.request.get(u'verifyServerCertificate'), + } + ) + + +class InstanceSettings(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'databaseFlags': InstanceDatabaseflagsArray(self.request.get('database_flags', []), self.module).to_request(), + u'ipConfiguration': InstanceIpconfiguration(self.request.get('ip_configuration', {}), self.module).to_request(), + u'tier': self.request.get('tier'), + u'availabilityType': self.request.get('availability_type'), + u'backupConfiguration': InstanceBackupconfiguration(self.request.get('backup_configuration', {}), self.module).to_request(), + u'userLabels': self.request.get('user_labels'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'databaseFlags': InstanceDatabaseflagsArray(self.request.get(u'databaseFlags', []), self.module).from_response(), + u'ipConfiguration': InstanceIpconfiguration(self.request.get(u'ipConfiguration', {}), self.module).from_response(), + u'tier': self.request.get(u'tier'), + u'availabilityType': self.request.get(u'availabilityType'), + u'backupConfiguration': InstanceBackupconfiguration(self.request.get(u'backupConfiguration', {}), self.module).from_response(), + u'userLabels': self.request.get(u'userLabels'), + } + ) + + +class InstanceDatabaseflagsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'name': item.get('name'), u'value': item.get('value')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'name': item.get(u'name'), u'value': item.get(u'value')}) + + +class InstanceIpconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'ipv4Enabled': self.request.get('ipv4_enabled'), + u'privateNetwork': self.request.get('private_network'), + u'authorizedNetworks': InstanceAuthorizednetworksArray(self.request.get('authorized_networks', []), self.module).to_request(), + u'requireSsl': self.request.get('require_ssl'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'ipv4Enabled': self.request.get(u'ipv4Enabled'), + u'privateNetwork': self.request.get(u'privateNetwork'), + u'authorizedNetworks': InstanceAuthorizednetworksArray(self.request.get(u'authorizedNetworks', []), self.module).from_response(), + u'requireSsl': self.request.get(u'requireSsl'), + } + ) + + +class InstanceAuthorizednetworksArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({u'expirationTime': item.get('expiration_time'), u'name': item.get('name'), u'value': item.get('value')}) + + def _response_from_item(self, item): + return remove_nones_from_dict({u'expirationTime': item.get(u'expirationTime'), u'name': item.get(u'name'), u'value': item.get(u'value')}) + + +class InstanceBackupconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + {u'enabled': self.request.get('enabled'), u'binaryLogEnabled': self.request.get('binary_log_enabled'), u'startTime': self.request.get('start_time')} + ) + + def from_response(self): + return remove_nones_from_dict( + {u'enabled': self.request.get(u'enabled'), u'binaryLogEnabled': self.request.get(u'binaryLogEnabled'), u'startTime': self.request.get(u'startTime')} + ) + + +class InstanceDiskencryptionconfiguration(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')}) + + def from_response(self): + return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')}) + + +class InstanceDiskencryptionstatus(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'kmsKeyVersionName': self.request.get('kms_key_version_name')}) + + def from_response(self): + return remove_nones_from_dict({u'kmsKeyVersionName': self.request.get(u'kmsKeyVersionName')}) + + +class InstanceServercacert(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'cert': self.request.get('cert'), + u'certSerialNumber': self.request.get('cert_serial_number'), + u'commonName': self.request.get('common_name'), + u'createTime': self.request.get('create_time'), + u'expirationTime': self.request.get('expiration_time'), + u'sha1Fingerprint': self.request.get('sha1_fingerprint'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'cert': self.request.get(u'cert'), + u'certSerialNumber': self.request.get(u'certSerialNumber'), + u'commonName': self.request.get(u'commonName'), + u'createTime': self.request.get(u'createTime'), + u'expirationTime': self.request.get(u'expirationTime'), + u'sha1Fingerprint': self.request.get(u'sha1Fingerprint'), + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_instance_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_instance_info.py new file mode 100644 index 000000000..71d09d20e --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_instance_info.py @@ -0,0 +1,555 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_sql_instance_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + backendType: + description: + - "* FIRST_GEN: First Generation instance. MySQL only." + - "* SECOND_GEN: Second Generation instance or PostgreSQL instance." + - "* EXTERNAL: A database server that is not managed by Google." + returned: success + type: str + connectionName: + description: + - Connection name of the Cloud SQL instance used in connection strings. + returned: success + type: str + databaseVersion: + description: + - The database engine type and version. For First Generation instances, can + be MYSQL_5_5, or MYSQL_5_6. For Second Generation instances, can be MYSQL_5_6 + or MYSQL_5_7. Defaults to MYSQL_5_6. + - 'PostgreSQL instances: POSTGRES_9_6 The databaseVersion property can not be + changed after instance creation.' + returned: success + type: str + failoverReplica: + description: + - The name and status of the failover replica. This property is applicable only + to Second Generation instances. + returned: success + type: complex + contains: + available: + description: + - The availability status of the failover replica. A false status indicates + that the failover replica is out of sync. The master can only failover + to the failover replica when the status is true. + returned: success + type: bool + name: + description: + - The name of the failover replica. If specified at instance creation, a + failover replica is created for the instance. The name doesn't include + the project ID. This property is applicable only to Second Generation + instances. + returned: success + type: str + instanceType: + description: + - The instance type. This can be one of the following. + - "* CLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from a + master." + - "* ON_PREMISES_INSTANCE: An instance running on the customer's premises." + - "* READ_REPLICA_INSTANCE: A Cloud SQL instance configured as a read-replica." + returned: success + type: str + ipAddresses: + description: + - The assigned IP addresses for the instance. + returned: success + type: complex + contains: + ipAddress: + description: + - The IP address assigned. + returned: success + type: str + timeToRetire: + description: + - The due time for this IP to be retired in RFC 3339 format, for example + 2012-11-15T16:19:00.094Z. This field is only available when the IP is + scheduled to be retired. + returned: success + type: str + type: + description: + - The type of this IP address. A PRIMARY address is an address that can + accept incoming connections. An OUTGOING address is the source address + of connections originating from the instance, if supported. + returned: success + type: str + ipv6Address: + description: + - The IPv6 address assigned to the instance. This property is applicable only + to First Generation instances. + returned: success + type: str + masterInstanceName: + description: + - The name of the instance which will act as master in the replication setup. + returned: success + type: str + maxDiskSize: + description: + - The maximum disk size of the instance in bytes. + returned: success + type: int + name: + description: + - Name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: str + region: + description: + - The geographical region. Defaults to us-central or us-central1 depending on + the instance type (First Generation or Second Generation/PostgreSQL). + returned: success + type: str + replicaConfiguration: + description: + - Configuration specific to failover replicas and read replicas. + returned: success + type: complex + contains: + failoverTarget: + description: + - Specifies if the replica is the failover target. If the field is set to + true the replica will be designated as a failover replica. + - In case the master instance fails, the replica instance will be promoted + as the new master instance. + - Only one replica can be specified as failover target, and the replica + has to be in different zone with the master instance. + returned: success + type: bool + mysqlReplicaConfiguration: + description: + - MySQL specific configuration when replicating from a MySQL on-premises + master. Replication configuration information such as the username, password, + certificates, and keys are not stored in the instance metadata. The configuration + information is used only to set up the replication connection and is stored + by MySQL in a file named master.info in the data directory. + returned: success + type: complex + contains: + caCertificate: + description: + - PEM representation of the trusted CA's x509 certificate. + returned: success + type: str + clientCertificate: + description: + - PEM representation of the replica's x509 certificate . + returned: success + type: str + clientKey: + description: + - PEM representation of the replica's private key. The corresponding + public key is encoded in the client's certificate. + returned: success + type: str + connectRetryInterval: + description: + - Seconds to wait between connect retries. MySQL's default is 60 seconds. + returned: success + type: int + dumpFilePath: + description: + - Path to a SQL dump file in Google Cloud Storage from which the replica + instance is to be created. The URI is in the form gs://bucketName/fileName. + Compressed gzip files (.gz) are also supported. Dumps should have + the binlog coordinates from which replication should begin. This can + be accomplished by setting --master-data to 1 when using mysqldump. + returned: success + type: str + masterHeartbeatPeriod: + description: + - Interval in milliseconds between replication heartbeats. + returned: success + type: int + password: + description: + - The password for the replication connection. + returned: success + type: str + sslCipher: + description: + - A list of permissible ciphers to use for SSL encryption. + returned: success + type: str + username: + description: + - The username for the replication connection. + returned: success + type: str + verifyServerCertificate: + description: + - Whether or not to check the master's Common Name value in the certificate + that it sends during the SSL handshake. + returned: success + type: bool + replicaNames: + description: + - The replicas of the instance. + returned: success + type: list + serviceAccountEmailAddress: + description: + - The service account email address assigned to the instance. This property + is applicable only to Second Generation instances. + returned: success + type: str + settings: + description: + - The user settings. + returned: success + type: complex + contains: + databaseFlags: + description: + - The database flags passed to the instance at startup. + returned: success + type: complex + contains: + name: + description: + - The name of the flag. These flags are passed at instance startup, + so include both server options and system variables for MySQL. Flags + should be specified with underscores, not hyphens. + returned: success + type: str + value: + description: + - The value of the flag. Booleans should be set to on for true and off + for false. This field must be omitted if the flag doesn't take a value. + returned: success + type: str + ipConfiguration: + description: + - The settings for IP Management. This allows to enable or disable the instance + IP and manage which external networks can connect to the instance. The + IPv4 address cannot be disabled for Second Generation instances. + returned: success + type: complex + contains: + ipv4Enabled: + description: + - Whether the instance should be assigned an IP address or not. + returned: success + type: bool + privateNetwork: + description: + - The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP + (e.g /projects/myProject/global/networks/default) + returned: success + type: str + authorizedNetworks: + description: + - The list of external networks that are allowed to connect to the instance + using the IP. In CIDR notation, also known as 'slash' notation (e.g. + 192.168.100.0/24). + returned: success + type: complex + contains: + expirationTime: + description: + - The time when this access control entry expires in RFC 3339 format, + for example 2012-11-15T16:19:00.094Z. + returned: success + type: str + name: + description: + - An optional label to identify this entry. + returned: success + type: str + value: + description: + - The whitelisted value for the access control list. For example, + to grant access to a client from an external IP (IPv4 or IPv6) + address or subnet, use that address or subnet here. + returned: success + type: str + requireSsl: + description: + - Whether the mysqld should default to 'REQUIRE X509' for users connecting + over IP. + returned: success + type: bool + tier: + description: + - The tier or machine type for this instance, for example db-n1-standard-1. + For MySQL instances, this field determines whether the instance is Second + Generation (recommended) or First Generation. + returned: success + type: str + availabilityType: + description: + - The availabilityType define if your postgres instance is run zonal or + regional. + returned: success + type: str + backupConfiguration: + description: + - The daily backup configuration for the instance. + returned: success + type: complex + contains: + enabled: + description: + - Enable Autobackup for your instance. + returned: success + type: bool + binaryLogEnabled: + description: + - Whether binary log is enabled. If backup configuration is disabled, + binary log must be disabled as well. MySQL only. + returned: success + type: bool + startTime: + description: + - Define the backup start time in UTC (HH:MM) . + returned: success + type: str + settingsVersion: + description: + - The version of instance settings. This is a required field for update + method to make sure concurrent updates are handled properly. + - During update, use the most recent settingsVersion value for this instance + and do not try to update this value. + returned: success + type: int + userLabels: + description: + - User-provided labels, represented as a dictionary where each label is + a single key value pair. + returned: success + type: dict + gceZone: + description: + - The Compute Engine zone that the instance is currently serving from. This + value could be different from the zone that was specified when the instance + was created if the instance has failed over to its secondary zone. + returned: success + type: str + state: + description: + - The current serving state of the database instance. + returned: success + type: str + diskEncryptionConfiguration: + description: + - Disk encryption settings. + returned: success + type: complex + contains: + kmsKeyName: + description: + - The KMS key used to encrypt the Cloud SQL instance . + returned: success + type: str + diskEncryptionStatus: + description: + - Disk encryption status. + returned: success + type: complex + contains: + kmsKeyVersionName: + description: + - The KMS key version used to encrypt the Cloud SQL instance . + returned: success + type: str + serverCaCert: + description: + - SSL configuration. + returned: success + type: complex + contains: + cert: + description: + - PEM representation of the X.509 certificate. + returned: success + type: str + certSerialNumber: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str + commonName: + description: + - User supplied name. Constrained to [a-zA-Z.-_ ]+. + returned: success + type: str + createTime: + description: + - The time when the certificate was created in RFC 3339 format, for example + 2012-11-15T16:19:00.094Z. + returned: success + type: str + expirationTime: + description: + - The time when the certificate expires in RFC 3339 format, for example + 2012-11-15T16:19:00.094Z. + returned: success + type: str + sha1Fingerprint: + description: + - SHA-1 fingerprint of the certificate. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sql') + return auth.list(link, return_if_object, array_name='items') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_ssl_cert.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_ssl_cert.py new file mode 100644 index 000000000..96e9cc1b9 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_ssl_cert.py @@ -0,0 +1,392 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_ssl_cert +description: +- Represents an SSL certificate created for a Cloud SQL instance. To use the SSL certificate + you must have the SSL Client Certificate and the associated SSL Client Key. The + Client Key can be downloaded only when the SSL certificate is created with the insert + method. +short_description: Creates a GCP SslCert +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + cert: + description: + - PEM representation of the X.509 certificate. + required: false + type: str + cert_serial_number: + description: + - Serial number, as extracted from the certificate. + required: false + type: str + common_name: + description: + - User supplied name. Constrained to [a-zA-Z.-_ ]+. + required: false + type: str + create_time: + description: + - The time when the certificate was created in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + required: false + type: str + expiration_time: + description: + - The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + required: false + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + sha1_fingerprint: + description: + - The SHA-1 of the certificate. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_sql_instance: + name: "{{resource_name}}-2" + settings: + ip_configuration: + authorized_networks: + - name: google dns server + value: 8.8.8.8/32 + tier: db-n1-standard-1 + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instance + +- name: create a SSL cert + google.cloud.gcp_sql_ssl_cert: + common_name: "{{resource_name}}" + instance: "{{instance['name'}}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +cert: + description: + - PEM representation of the X.509 certificate. + returned: success + type: str +certSerialNumber: + description: + - Serial number, as extracted from the certificate. + returned: success + type: str +commonName: + description: + - User supplied name. Constrained to [a-zA-Z.-_ ]+. + returned: success + type: str +createTime: + description: + - The time when the certificate was created in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + returned: success + type: str +expirationTime: + description: + - The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + returned: success + type: str +instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: dict +sha1Fingerprint: + description: + - The SHA-1 of the certificate. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + cert=dict(type='str'), + cert_serial_number=dict(type='str'), + common_name=dict(type='str'), + create_time=dict(type='str'), + expiration_time=dict(type='str'), + instance=dict(required=True, type='dict'), + sha1_fingerprint=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + state = module.params['state'] + kind = 'sql#sslCert' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'kind': 'sql#sslCert', + u'cert': module.params.get('cert'), + u'certSerialNumber': module.params.get('cert_serial_number'), + u'commonName': module.params.get('common_name'), + u'createTime': module.params.get('create_time'), + u'expirationTime': module.params.get('expiration_time'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'sql') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1_fingerprint}".format(**res) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/sslCerts".format(**res) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'cert': response.get(u'cert'), + u'certSerialNumber': response.get(u'certSerialNumber'), + u'commonName': response.get(u'commonName'), + u'createTime': response.get(u'createTime'), + u'expirationTime': response.get(u'expirationTime'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'sql#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_done = wait_for_completion(status, op_result, module) + return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'sql#sslCert') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'sql#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_user.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_user.py new file mode 100644 index 000000000..d7e211a83 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_user.py @@ -0,0 +1,392 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_user +description: +- The Users resource represents a database user in a Cloud SQL instance. +short_description: Creates a GCP User +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + host: + description: + - The host name from which the user can connect. For insert operations, host defaults + to an empty string. For update operations, host is specified as part of the + request URL. The host name cannot be updated after insertion. + required: true + type: str + name: + description: + - The name of the user in the Cloud SQL instance. + required: true + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + password: + description: + - The password for the user. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a instance + google.cloud.gcp_sql_instance: + name: "{{resource_name}}-1" + settings: + ip_configuration: + authorized_networks: + - name: google dns server + value: 8.8.8.8/32 + tier: db-n1-standard-1 + region: us-central1 + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: instance + +- name: create a user + google.cloud.gcp_sql_user: + name: test-user + host: 10.1.2.3 + password: secret-password + instance: "{{ instance }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +host: + description: + - The host name from which the user can connect. For insert operations, host defaults + to an empty string. For update operations, host is specified as part of the request + URL. The host name cannot be updated after insertion. + returned: success + type: str +name: + description: + - The name of the user in the Cloud SQL instance. + returned: success + type: str +instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: dict +password: + description: + - The password for the user. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + host=dict(required=True, type='str'), + name=dict(required=True, type='str'), + instance=dict(required=True, type='dict'), + password=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + state = module.params['state'] + kind = 'sql#user' + + fetch = fetch_wrapped_resource(module, 'sql#user', 'sql#usersList', 'items') + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.put(link, resource_to_request(module))) + + +def delete(module, link, kind): + auth = GcpSession(module, 'sql') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = {u'kind': 'sql#user', u'password': module.params.get('password'), u'host': module.params.get('host'), u'name': module.params.get('name')} + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def unwrap_resource_filter(module): + return {'name': module.params['name'], 'host': module.params['host']} + + +def unwrap_resource(result, module): + query_predicate = unwrap_resource_filter(module) + matched_items = [] + for item in result: + if all(item[k] == query_predicate[k] for k in query_predicate.keys()): + matched_items.append(item) + if len(matched_items) > 1: + module.fail_json(msg="More than 1 result found: %s" % matched_items) + + if matched_items: + return matched_items[0] + else: + return None + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'sql') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def fetch_wrapped_resource(module, kind, wrap_kind, wrap_path): + result = fetch_resource(module, self_link(module), wrap_kind) + if result is None or wrap_path not in result: + return None + + result = unwrap_resource(result[wrap_path], module) + + if result is None: + return None + + if result['kind'] != kind: + module.fail_json(msg="Incorrect result: {kind}".format(**result)) + + return result + + +def self_link(module): + res = { + 'project': module.params['project'], + 'instance': replace_resource_dict(module.params['instance'], 'name'), + 'name': module.params['name'], + 'host': module.params['host'], + } + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users?name={name}&host={host}".format(**res) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + # SQL only: return on 403 if not exist + if allow_not_found and response.status_code == 403: + return None + + try: + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return {u'host': response.get(u'host'), u'name': response.get(u'name')} + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response, 'sql#operation') + if op_result is None: + return {} + status = navigate_hash(op_result, ['status']) + wait_for_completion(status, op_result, module) + return fetch_wrapped_resource(module, 'sql#user', 'sql#usersList', 'items') + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while status != 'DONE': + raise_if_errors(op_result, ['error', 'errors'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, 'sql#operation', False) + status = navigate_hash(op_result, ['status']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_sql_user_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_sql_user_info.py new file mode 100644 index 000000000..dfb3e08b1 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_sql_user_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_user_info +description: +- Gather info for GCP User +short_description: Gather info for GCP User +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a user + gcp_sql_user_info: + instance: "{{ instance }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + host: + description: + - The host name from which the user can connect. For insert operations, host + defaults to an empty string. For update operations, host is specified as part + of the request URL. The host name cannot be updated after insertion. + returned: success + type: str + name: + description: + - The name of the user in the Cloud SQL instance. + returned: success + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: dict + password: + description: + - The password for the user. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://sqladmin.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sql') + return auth.list(link, return_if_object, array_name='items') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket.py b/ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket.py new file mode 100644 index 000000000..f9bad465d --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket.py @@ -0,0 +1,1428 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_storage_bucket +description: +- The Buckets resource represents a bucket in Google Cloud Storage. There is a single + global namespace shared by all buckets. For more information, see Bucket Name Requirements. +- Buckets contain objects which can be accessed by their own methods. In addition + to the acl property, buckets contain bucketAccessControls, for use in fine-grained + manipulation of an existing bucket's access controls. +- A bucket is always owned by the project team owners group. +short_description: Creates a GCP Bucket +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + acl: + description: + - Access controls on the bucket. + elements: dict + required: false + type: list + suboptions: + bucket: + description: + - The name of the bucket. + - 'This field represents a link to a Bucket resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value + of your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_storage_bucket task and then set this bucket field to "{{ name-of-resource + }}"' + required: true + type: dict + entity: + description: + - 'The entity holding the permission, in one of the following forms: user-userId + user-email group-groupId group-email domain-domain project-team-projectId + allUsers allAuthenticatedUsers Examples: The user liz@example.com would + be user-liz@example.com.' + - The group example@googlegroups.com would be group-example@googlegroups.com. + - To refer to all members of the Google Apps for Business domain example.com, + the entity would be domain-example.com. + required: true + type: str + entity_id: + description: + - The ID for the entity. + required: false + type: str + project_team: + description: + - The project team associated with the entity. + required: false + type: dict + suboptions: + project_number: + description: + - The project team associated with the entity. + required: false + type: str + team: + description: + - The team. + - 'Some valid choices include: "editors", "owners", "viewers"' + required: false + type: str + role: + description: + - The access permission for the entity. + - 'Some valid choices include: "OWNER", "READER", "WRITER"' + required: false + type: str + cors: + description: + - The bucket's Cross-Origin Resource Sharing (CORS) configuration. + elements: dict + required: false + type: list + suboptions: + max_age_seconds: + description: + - The value, in seconds, to return in the Access-Control-Max-Age header used + in preflight responses. + required: false + type: int + method: + description: + - 'The list of HTTP methods on which to include CORS response headers, (GET, + OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means + "any method".' + elements: str + required: false + type: list + origin: + description: + - The list of Origins eligible to receive CORS response headers. + - 'Note: "*" is permitted in the list of origins, and means "any Origin".' + elements: str + required: false + type: list + response_header: + description: + - The list of HTTP headers other than the simple response headers to give + permission for the user-agent to share across domains. + elements: str + required: false + type: list + default_event_based_hold: + description: + - Whether or not to automatically apply an eventBasedHold to new objects added + to the bucket. + required: false + type: bool + default_object_acl: + description: + - Default access controls to apply to new objects when no ACL is provided. + elements: dict + required: false + type: list + suboptions: + bucket: + description: + - The name of the bucket. + - 'This field represents a link to a Bucket resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value + of your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_storage_bucket task and then set this bucket field to "{{ name-of-resource + }}"' + required: true + type: dict + entity: + description: + - 'The entity holding the permission, in one of the following forms: * user-{{userId}} + * user-{{email}} (such as "user-liz@example.com") * group-{{groupId}} * + group-{{email}} (such as "group-example@googlegroups.com") * domain-{{domain}} + (such as "domain-example.com") * project-team-{{projectId}} * allUsers * + allAuthenticatedUsers .' + required: true + type: str + object: + description: + - The name of the object, if applied to an object. + required: false + type: str + role: + description: + - The access permission for the entity. + - 'Some valid choices include: "OWNER", "READER"' + required: true + type: str + lifecycle: + description: + - The bucket's lifecycle configuration. + - See U(https://developers.google.com/storage/docs/lifecycle) for more information. + required: false + type: dict + suboptions: + rule: + description: + - A lifecycle management rule, which is made of an action to take and the + condition(s) under which the action will be taken. + elements: dict + required: false + type: list + suboptions: + action: + description: + - The action to take. + required: false + type: dict + suboptions: + storage_class: + description: + - Target storage class. Required iff the type of the action is SetStorageClass. + required: false + type: str + type: + description: + - Type of the action. Currently, only Delete and SetStorageClass are + supported. + - 'Some valid choices include: "Delete", "SetStorageClass"' + required: false + type: str + condition: + description: + - The condition(s) under which the action will be taken. + required: false + type: dict + suboptions: + age_days: + description: + - Age of an object (in days). This condition is satisfied when an + object reaches the specified age. + required: false + type: int + created_before: + description: + - A date in RFC 3339 format with only the date part (for instance, + "2013-01-15"). This condition is satisfied when an object is created + before midnight of the specified date in UTC. + required: false + type: str + custom_time_before: + description: + - A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied + when the customTime metadata for the object is set to an earlier + date than the date used in this lifecycle condition. + required: false + type: str + days_since_custom_time: + description: + - Days since the date set in the customTime metadata for the object. + This condition is satisfied when the current date and time is at + least the specified number of days after the customTime. + required: false + type: int + days_since_noncurrent_time: + description: + - Relevant only for versioned objects. This condition is satisfied + when an object has been noncurrent for more than the specified number + of days. + required: false + type: int + is_live: + description: + - Relevant only for versioned objects. If the value is true, this + condition matches live objects; if the value is false, it matches + archived objects. + required: false + type: bool + matches_storage_class: + description: + - Objects having any of the storage classes specified by this condition + will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, + COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY. + elements: str + required: false + type: list + noncurrent_time_before: + description: + - Relevant only for versioned objects. A date in the RFC 3339 format + YYYY-MM-DD. This condition is satisfied for objects that became + noncurrent on a date prior to the one specified in this condition. + required: false + type: str + num_newer_versions: + description: + - Relevant only for versioned objects. If the value is N, this condition + is satisfied when there are at least N versions (including the live + version) newer than this version of the object. + required: false + type: int + location: + description: + - The location of the bucket. Object data for objects in the bucket resides in + physical storage within this region. Defaults to US. See the developer's guide + for the authoritative list. + required: false + type: str + logging: + description: + - The bucket's logging configuration, which defines the destination bucket and + optional name prefix for the current bucket's logs. + required: false + type: dict + suboptions: + log_bucket: + description: + - The destination bucket where the current bucket's logs should be placed. + required: false + type: str + log_object_prefix: + description: + - A prefix for log object names. + required: false + type: str + metageneration: + description: + - The metadata generation of this bucket. + required: false + type: int + name: + description: + - The name of the bucket. + required: false + type: str + owner: + description: + - The owner of the bucket. This is always the project team's owner group. + required: false + type: dict + suboptions: + entity: + description: + - The entity, in the form project-owner-projectId. + required: false + type: str + storage_class: + description: + - The bucket's default storage class, used whenever no storageClass is specified + for a newly-created object. This defines how objects in the bucket are stored + and determines the SLA and the cost of storage. + - Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, + and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket + is created, it will default to STANDARD. For more information, see storage classes. + - 'Some valid choices include: "MULTI_REGIONAL", "REGIONAL", "STANDARD", "NEARLINE", + "COLDLINE", "ARCHIVE", "DURABLE_REDUCED_AVAILABILITY"' + required: false + type: str + versioning: + description: + - The bucket's versioning configuration. + required: false + type: dict + suboptions: + enabled: + description: + - While set to true, versioning is fully enabled for this bucket. + required: false + type: bool + website: + description: + - The bucket's website configuration, controlling how the service behaves when + accessing bucket contents as a web site. See the Static Website Examples for + more information. + required: false + type: dict + suboptions: + main_page_suffix: + description: + - If the requested object path is missing, the service will ensure the path + has a trailing '/', append this suffix, and attempt to retrieve the resulting + object. This allows the creation of index.html objects to represent directory + pages. + required: false + type: str + not_found_page: + description: + - If the requested object path is missing, and any mainPageSuffix object is + missing, if applicable, the service will return the named object from this + bucket as the content for a 404 Not Found result. + required: false + type: str + labels: + description: + - Labels applied to this bucket. A list of key->value pairs. + required: false + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + predefined_default_object_acl: + description: + - Apply a predefined set of default object access controls to this bucket. + - 'Acceptable values are: - "authenticatedRead": Object owner gets OWNER access, + and allAuthenticatedUsers get READER access.' + - '- "bucketOwnerFullControl": Object owner gets OWNER access, and project team + owners get OWNER access.' + - '- "bucketOwnerRead": Object owner gets OWNER access, and project team owners + get READER access.' + - '- "private": Object owner gets OWNER access.' + - '- "projectPrivate": Object owner gets OWNER access, and project team members + get access according to their roles.' + - '- "publicRead": Object owner gets OWNER access, and allUsers get READER access.' + - 'Some valid choices include: "authenticatedRead", "bucketOwnerFullControl", + "bucketOwnerRead", "private", "projectPrivate", "publicRead"' + required: false + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +''' + +EXAMPLES = ''' +- name: create a bucket + google.cloud.gcp_storage_bucket: + name: ansible-storage-module + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +acl: + description: + - Access controls on the bucket. + returned: success + type: complex + contains: + bucket: + description: + - The name of the bucket. + returned: success + type: dict + domain: + description: + - The domain associated with the entity. + returned: success + type: str + email: + description: + - The email address associated with the entity. + returned: success + type: str + entity: + description: + - 'The entity holding the permission, in one of the following forms: user-userId + user-email group-groupId group-email domain-domain project-team-projectId + allUsers allAuthenticatedUsers Examples: The user liz@example.com would be + user-liz@example.com.' + - The group example@googlegroups.com would be group-example@googlegroups.com. + - To refer to all members of the Google Apps for Business domain example.com, + the entity would be domain-example.com. + returned: success + type: str + entityId: + description: + - The ID for the entity. + returned: success + type: str + id: + description: + - The ID of the access-control entry. + returned: success + type: str + projectTeam: + description: + - The project team associated with the entity. + returned: success + type: complex + contains: + projectNumber: + description: + - The project team associated with the entity. + returned: success + type: str + team: + description: + - The team. + returned: success + type: str + role: + description: + - The access permission for the entity. + returned: success + type: str +cors: + description: + - The bucket's Cross-Origin Resource Sharing (CORS) configuration. + returned: success + type: complex + contains: + maxAgeSeconds: + description: + - The value, in seconds, to return in the Access-Control-Max-Age header used + in preflight responses. + returned: success + type: int + method: + description: + - 'The list of HTTP methods on which to include CORS response headers, (GET, + OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means + "any method".' + returned: success + type: list + origin: + description: + - The list of Origins eligible to receive CORS response headers. + - 'Note: "*" is permitted in the list of origins, and means "any Origin".' + returned: success + type: list + responseHeader: + description: + - The list of HTTP headers other than the simple response headers to give permission + for the user-agent to share across domains. + returned: success + type: list +defaultEventBasedHold: + description: + - Whether or not to automatically apply an eventBasedHold to new objects added to + the bucket. + returned: success + type: bool +defaultObjectAcl: + description: + - Default access controls to apply to new objects when no ACL is provided. + returned: success + type: complex + contains: + bucket: + description: + - The name of the bucket. + returned: success + type: dict + domain: + description: + - The domain associated with the entity. + returned: success + type: str + email: + description: + - The email address associated with the entity. + returned: success + type: str + entity: + description: + - 'The entity holding the permission, in one of the following forms: * user-{{userId}} + * user-{{email}} (such as "user-liz@example.com") * group-{{groupId}} * group-{{email}} + (such as "group-example@googlegroups.com") * domain-{{domain}} (such as "domain-example.com") + * project-team-{{projectId}} * allUsers * allAuthenticatedUsers .' + returned: success + type: str + entityId: + description: + - The ID for the entity. + returned: success + type: str + generation: + description: + - The content generation of the object, if applied to an object. + returned: success + type: int + id: + description: + - The ID of the access-control entry. + returned: success + type: str + object: + description: + - The name of the object, if applied to an object. + returned: success + type: str + projectTeam: + description: + - The project team associated with the entity. + returned: success + type: complex + contains: + projectNumber: + description: + - The project team associated with the entity. + returned: success + type: str + team: + description: + - The team. + returned: success + type: str + role: + description: + - The access permission for the entity. + returned: success + type: str +id: + description: + - The ID of the bucket. For buckets, the id and name properities are the same. + returned: success + type: str +lifecycle: + description: + - The bucket's lifecycle configuration. + - See U(https://developers.google.com/storage/docs/lifecycle) for more information. + returned: success + type: complex + contains: + rule: + description: + - A lifecycle management rule, which is made of an action to take and the condition(s) + under which the action will be taken. + returned: success + type: complex + contains: + action: + description: + - The action to take. + returned: success + type: complex + contains: + storageClass: + description: + - Target storage class. Required iff the type of the action is SetStorageClass. + returned: success + type: str + type: + description: + - Type of the action. Currently, only Delete and SetStorageClass are + supported. + returned: success + type: str + condition: + description: + - The condition(s) under which the action will be taken. + returned: success + type: complex + contains: + ageDays: + description: + - Age of an object (in days). This condition is satisfied when an object + reaches the specified age. + returned: success + type: int + createdBefore: + description: + - A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). + This condition is satisfied when an object is created before midnight + of the specified date in UTC. + returned: success + type: str + customTimeBefore: + description: + - A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied + when the customTime metadata for the object is set to an earlier date + than the date used in this lifecycle condition. + returned: success + type: str + daysSinceCustomTime: + description: + - Days since the date set in the customTime metadata for the object. + This condition is satisfied when the current date and time is at least + the specified number of days after the customTime. + returned: success + type: int + daysSinceNoncurrentTime: + description: + - Relevant only for versioned objects. This condition is satisfied when + an object has been noncurrent for more than the specified number of + days. + returned: success + type: int + isLive: + description: + - Relevant only for versioned objects. If the value is true, this condition + matches live objects; if the value is false, it matches archived objects. + returned: success + type: bool + matchesStorageClass: + description: + - Objects having any of the storage classes specified by this condition + will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, + COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY. + returned: success + type: list + noncurrentTimeBefore: + description: + - Relevant only for versioned objects. A date in the RFC 3339 format + YYYY-MM-DD. This condition is satisfied for objects that became noncurrent + on a date prior to the one specified in this condition. + returned: success + type: str + numNewerVersions: + description: + - Relevant only for versioned objects. If the value is N, this condition + is satisfied when there are at least N versions (including the live + version) newer than this version of the object. + returned: success + type: int +location: + description: + - The location of the bucket. Object data for objects in the bucket resides in physical + storage within this region. Defaults to US. See the developer's guide for the + authoritative list. + returned: success + type: str +logging: + description: + - The bucket's logging configuration, which defines the destination bucket and optional + name prefix for the current bucket's logs. + returned: success + type: complex + contains: + logBucket: + description: + - The destination bucket where the current bucket's logs should be placed. + returned: success + type: str + logObjectPrefix: + description: + - A prefix for log object names. + returned: success + type: str +metageneration: + description: + - The metadata generation of this bucket. + returned: success + type: int +name: + description: + - The name of the bucket. + returned: success + type: str +owner: + description: + - The owner of the bucket. This is always the project team's owner group. + returned: success + type: complex + contains: + entity: + description: + - The entity, in the form project-owner-projectId. + returned: success + type: str + entityId: + description: + - The ID for the entity. + returned: success + type: str +projectNumber: + description: + - The project number of the project the bucket belongs to. + returned: success + type: str +storageClass: + description: + - The bucket's default storage class, used whenever no storageClass is specified + for a newly-created object. This defines how objects in the bucket are stored + and determines the SLA and the cost of storage. + - Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, + and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket + is created, it will default to STANDARD. For more information, see storage classes. + returned: success + type: str +timeCreated: + description: + - The creation time of the bucket in RFC 3339 format. + returned: success + type: str +updated: + description: + - The modification time of the bucket in RFC 3339 format. + returned: success + type: str +versioning: + description: + - The bucket's versioning configuration. + returned: success + type: complex + contains: + enabled: + description: + - While set to true, versioning is fully enabled for this bucket. + returned: success + type: bool +website: + description: + - The bucket's website configuration, controlling how the service behaves when accessing + bucket contents as a web site. See the Static Website Examples for more information. + returned: success + type: complex + contains: + mainPageSuffix: + description: + - If the requested object path is missing, the service will ensure the path + has a trailing '/', append this suffix, and attempt to retrieve the resulting + object. This allows the creation of index.html objects to represent directory + pages. + returned: success + type: str + notFoundPage: + description: + - If the requested object path is missing, and any mainPageSuffix object is + missing, if applicable, the service will return the named object from this + bucket as the content for a 404 Not Found result. + returned: success + type: str +labels: + description: + - Labels applied to this bucket. A list of key->value pairs. + returned: success + type: dict +project: + description: + - A valid API project identifier. + returned: success + type: str +predefinedDefaultObjectAcl: + description: + - Apply a predefined set of default object access controls to this bucket. + - 'Acceptable values are: - "authenticatedRead": Object owner gets OWNER access, + and allAuthenticatedUsers get READER access.' + - '- "bucketOwnerFullControl": Object owner gets OWNER access, and project team + owners get OWNER access.' + - '- "bucketOwnerRead": Object owner gets OWNER access, and project team owners + get READER access.' + - '- "private": Object owner gets OWNER access.' + - '- "projectPrivate": Object owner gets OWNER access, and project team members + get access according to their roles.' + - '- "publicRead": Object owner gets OWNER access, and allUsers get READER access.' + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + acl=dict( + type='list', + elements='dict', + options=dict( + bucket=dict(required=True, type='dict'), + entity=dict(required=True, type='str'), + entity_id=dict(type='str'), + project_team=dict(type='dict', options=dict(project_number=dict(type='str'), team=dict(type='str'))), + role=dict(type='str'), + ), + ), + cors=dict( + type='list', + elements='dict', + options=dict( + max_age_seconds=dict(type='int'), + method=dict(type='list', elements='str'), + origin=dict(type='list', elements='str'), + response_header=dict(type='list', elements='str'), + ), + ), + default_event_based_hold=dict(type='bool'), + default_object_acl=dict( + type='list', + elements='dict', + options=dict( + bucket=dict(required=True, type='dict'), + entity=dict(required=True, type='str'), + object=dict(type='str'), + role=dict(required=True, type='str'), + ), + ), + lifecycle=dict( + type='dict', + options=dict( + rule=dict( + type='list', + elements='dict', + options=dict( + action=dict(type='dict', options=dict(storage_class=dict(type='str'), type=dict(type='str'))), + condition=dict( + type='dict', + options=dict( + age_days=dict(type='int'), + created_before=dict(type='str'), + custom_time_before=dict(type='str'), + days_since_custom_time=dict(type='int'), + days_since_noncurrent_time=dict(type='int'), + is_live=dict(type='bool'), + matches_storage_class=dict(type='list', elements='str'), + noncurrent_time_before=dict(type='str'), + num_newer_versions=dict(type='int'), + ), + ), + ), + ) + ), + ), + location=dict(type='str'), + logging=dict(type='dict', options=dict(log_bucket=dict(type='str'), log_object_prefix=dict(type='str'))), + metageneration=dict(type='int'), + name=dict(type='str'), + owner=dict(type='dict', options=dict(entity=dict(type='str'))), + storage_class=dict(type='str'), + versioning=dict(type='dict', options=dict(enabled=dict(type='bool'))), + website=dict(type='dict', options=dict(main_page_suffix=dict(type='str'), not_found_page=dict(type='str'))), + labels=dict(type='dict'), + project=dict(type='str'), + predefined_default_object_acl=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control'] + + state = module.params['state'] + kind = 'storage#bucket' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.post(link, resource_to_request(module)), kind) + + +def update(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.put(link, resource_to_request(module)), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.delete(link), kind) + + +def resource_to_request(module): + request = { + u'kind': 'storage#bucket', + u'project': module.params.get('project'), + u'predefinedDefaultObjectAcl': module.params.get('predefined_default_object_acl'), + u'acl': BucketAclArray(module.params.get('acl', []), module).to_request(), + u'cors': BucketCorsArray(module.params.get('cors', []), module).to_request(), + u'defaultEventBasedHold': module.params.get('default_event_based_hold'), + u'defaultObjectAcl': BucketDefaultobjectaclArray(module.params.get('default_object_acl', []), module).to_request(), + u'lifecycle': BucketLifecycle(module.params.get('lifecycle', {}), module).to_request(), + u'location': module.params.get('location'), + u'logging': BucketLogging(module.params.get('logging', {}), module).to_request(), + u'metageneration': module.params.get('metageneration'), + u'name': module.params.get('name'), + u'owner': BucketOwner(module.params.get('owner', {}), module).to_request(), + u'storageClass': module.params.get('storage_class'), + u'versioning': BucketVersioning(module.params.get('versioning', {}), module).to_request(), + u'website': BucketWebsite(module.params.get('website', {}), module).to_request(), + u'labels': module.params.get('labels'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + return "https://storage.googleapis.com/storage/v1/b/{name}?projection=full".format(**module.params) + + +def collection(module): + return "https://storage.googleapis.com/storage/v1/b?project={project}".format(**module.params) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'acl': BucketAclArray(response.get(u'acl', []), module).from_response(), + u'cors': BucketCorsArray(response.get(u'cors', []), module).from_response(), + u'defaultEventBasedHold': response.get(u'defaultEventBasedHold'), + u'defaultObjectAcl': BucketDefaultobjectaclArray(module.params.get('default_object_acl', []), module).to_request(), + u'id': response.get(u'id'), + u'lifecycle': BucketLifecycle(response.get(u'lifecycle', {}), module).from_response(), + u'location': response.get(u'location'), + u'logging': BucketLogging(response.get(u'logging', {}), module).from_response(), + u'metageneration': response.get(u'metageneration'), + u'name': response.get(u'name'), + u'owner': BucketOwner(response.get(u'owner', {}), module).from_response(), + u'projectNumber': response.get(u'projectNumber'), + u'storageClass': response.get(u'storageClass'), + u'timeCreated': response.get(u'timeCreated'), + u'updated': response.get(u'updated'), + u'versioning': BucketVersioning(response.get(u'versioning', {}), module).from_response(), + u'website': BucketWebsite(response.get(u'website', {}), module).from_response(), + u'labels': response.get(u'labels'), + } + + +class BucketAclArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'bucket': replace_resource_dict(item.get(u'bucket', {}), 'name'), + u'entity': item.get('entity'), + u'entityId': item.get('entity_id'), + u'projectTeam': BucketProjectteam(item.get('project_team', {}), self.module).to_request(), + u'role': item.get('role'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'bucket': item.get(u'bucket'), + u'entity': item.get(u'entity'), + u'entityId': item.get(u'entityId'), + u'projectTeam': BucketProjectteam(item.get(u'projectTeam', {}), self.module).from_response(), + u'role': item.get(u'role'), + } + ) + + +class BucketProjectteam(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'projectNumber': self.request.get('project_number'), u'team': self.request.get('team')}) + + def from_response(self): + return remove_nones_from_dict({u'projectNumber': self.request.get(u'projectNumber'), u'team': self.request.get(u'team')}) + + +class BucketCorsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'maxAgeSeconds': item.get('max_age_seconds'), + u'method': item.get('method'), + u'origin': item.get('origin'), + u'responseHeader': item.get('response_header'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'maxAgeSeconds': item.get(u'maxAgeSeconds'), + u'method': item.get(u'method'), + u'origin': item.get(u'origin'), + u'responseHeader': item.get(u'responseHeader'), + } + ) + + +class BucketDefaultobjectaclArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'bucket': replace_resource_dict(item.get(u'bucket', {}), 'name'), + u'entity': item.get('entity'), + u'object': item.get('object'), + u'role': item.get('role'), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + {u'bucket': item.get(u'bucket'), u'entity': item.get(u'entity'), u'object': item.get(u'object'), u'role': item.get(u'role')} + ) + + +class BucketProjectteam(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'projectNumber': self.request.get('project_number'), u'team': self.request.get('team')}) + + def from_response(self): + return remove_nones_from_dict({u'projectNumber': self.request.get(u'projectNumber'), u'team': self.request.get(u'team')}) + + +class BucketLifecycle(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'rule': BucketRuleArray(self.request.get('rule', []), self.module).to_request()}) + + def from_response(self): + return remove_nones_from_dict({u'rule': BucketRuleArray(self.request.get(u'rule', []), self.module).from_response()}) + + +class BucketRuleArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict( + { + u'action': BucketAction(item.get('action', {}), self.module).to_request(), + u'condition': BucketCondition(item.get('condition', {}), self.module).to_request(), + } + ) + + def _response_from_item(self, item): + return remove_nones_from_dict( + { + u'action': BucketAction(item.get(u'action', {}), self.module).from_response(), + u'condition': BucketCondition(item.get(u'condition', {}), self.module).from_response(), + } + ) + + +class BucketAction(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'storageClass': self.request.get('storage_class'), u'type': self.request.get('type')}) + + def from_response(self): + return remove_nones_from_dict({u'storageClass': self.request.get(u'storageClass'), u'type': self.request.get(u'type')}) + + +class BucketCondition(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict( + { + u'age': self.request.get('age_days'), + u'createdBefore': self.request.get('created_before'), + u'customTimeBefore': self.request.get('custom_time_before'), + u'daysSinceCustomTime': self.request.get('days_since_custom_time'), + u'daysSinceNoncurrentTime': self.request.get('days_since_noncurrent_time'), + u'isLive': self.request.get('is_live'), + u'matchesStorageClass': self.request.get('matches_storage_class'), + u'noncurrentTimeBefore': self.request.get('noncurrent_time_before'), + u'numNewerVersions': self.request.get('num_newer_versions'), + } + ) + + def from_response(self): + return remove_nones_from_dict( + { + u'age': self.request.get(u'age'), + u'createdBefore': self.request.get(u'createdBefore'), + u'customTimeBefore': self.request.get(u'customTimeBefore'), + u'daysSinceCustomTime': self.request.get(u'daysSinceCustomTime'), + u'daysSinceNoncurrentTime': self.request.get(u'daysSinceNoncurrentTime'), + u'isLive': self.request.get(u'isLive'), + u'matchesStorageClass': self.request.get(u'matchesStorageClass'), + u'noncurrentTimeBefore': self.request.get(u'noncurrentTimeBefore'), + u'numNewerVersions': self.request.get(u'numNewerVersions'), + } + ) + + +class BucketLogging(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'logBucket': self.request.get('log_bucket'), u'logObjectPrefix': self.request.get('log_object_prefix')}) + + def from_response(self): + return remove_nones_from_dict({u'logBucket': self.request.get(u'logBucket'), u'logObjectPrefix': self.request.get(u'logObjectPrefix')}) + + +class BucketOwner(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'entity': self.request.get('entity')}) + + def from_response(self): + return remove_nones_from_dict({u'entity': self.request.get(u'entity')}) + + +class BucketVersioning(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'enabled': self.request.get('enabled')}) + + def from_response(self): + return remove_nones_from_dict({u'enabled': self.request.get(u'enabled')}) + + +class BucketWebsite(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'mainPageSuffix': self.request.get('main_page_suffix'), u'notFoundPage': self.request.get('not_found_page')}) + + def from_response(self): + return remove_nones_from_dict({u'mainPageSuffix': self.request.get(u'mainPageSuffix'), u'notFoundPage': self.request.get(u'notFoundPage')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket_access_control.py b/ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket_access_control.py new file mode 100644 index 000000000..c6d36a81d --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_storage_bucket_access_control.py @@ -0,0 +1,395 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_storage_bucket_access_control +description: +- The BucketAccessControls resource represents the Access Control Lists (ACLs) for + buckets within Google Cloud Storage. ACLs let you specify who has access to your + data and to what extent. +- 'There are three roles that can be assigned to an entity: READERs can get the bucket, + though no acl property will be returned, and list the bucket''s objects. WRITERs + are READERs, and they can insert objects into the bucket and delete the bucket''s + objects. OWNERs are WRITERs, and they can get the acl property of a bucket, update + a bucket, and call all BucketAccessControls methods on the bucket. For more information, + see Access Control, with the caveat that this API uses READER, WRITER, and OWNER + instead of READ, WRITE, and FULL_CONTROL.' +short_description: Creates a GCP BucketAccessControl +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + bucket: + description: + - The name of the bucket. + - 'This field represents a link to a Bucket resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_storage_bucket task and then set this bucket field to "{{ name-of-resource + }}"' + required: true + type: dict + entity: + description: + - 'The entity holding the permission, in one of the following forms: user-userId + user-email group-groupId group-email domain-domain project-team-projectId allUsers + allAuthenticatedUsers Examples: The user liz@example.com would be user-liz@example.com.' + - The group example@googlegroups.com would be group-example@googlegroups.com. + - To refer to all members of the Google Apps for Business domain example.com, + the entity would be domain-example.com. + required: true + type: str + role: + description: + - The access permission for the entity. + - 'Some valid choices include: "OWNER", "READER", "WRITER"' + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls)' +- 'Official Documentation: U(https://cloud.google.com/storage/docs/access-control/lists)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a bucket + google.cloud.gcp_storage_bucket: + name: "{{ resource_name }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: bucket + +- name: create a bucket access control + google.cloud.gcp_storage_bucket_access_control: + bucket: "{{ bucket }}" + entity: user-alexstephen@google.com + role: WRITER + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +bucket: + description: + - The name of the bucket. + returned: success + type: dict +domain: + description: + - The domain associated with the entity. + returned: success + type: str +email: + description: + - The email address associated with the entity. + returned: success + type: str +entity: + description: + - 'The entity holding the permission, in one of the following forms: user-userId + user-email group-groupId group-email domain-domain project-team-projectId allUsers + allAuthenticatedUsers Examples: The user liz@example.com would be user-liz@example.com.' + - The group example@googlegroups.com would be group-example@googlegroups.com. + - To refer to all members of the Google Apps for Business domain example.com, the + entity would be domain-example.com. + returned: success + type: str +entityId: + description: + - The ID for the entity. + returned: success + type: str +id: + description: + - The ID of the access-control entry. + returned: success + type: str +projectTeam: + description: + - The project team associated with the entity. + returned: success + type: complex + contains: + projectNumber: + description: + - The project team associated with the entity. + returned: success + type: str + team: + description: + - The team. + returned: success + type: str +role: + description: + - The access permission for the entity. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + bucket=dict(required=True, type='dict'), + entity=dict(required=True, type='str'), + role=dict(type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control'] + + state = module.params['state'] + kind = 'storage#bucketAccessControl' + + fetch = fetch_resource(module, self_link(module), kind) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.post(link, resource_to_request(module)), kind) + + +def update(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.put(link, resource_to_request(module)), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.delete(link), kind) + + +def resource_to_request(module): + request = { + u'kind': 'storage#bucketAccessControl', + u'bucket': replace_resource_dict(module.params.get(u'bucket', {}), 'name'), + u'entity': module.params.get('entity'), + u'role': module.params.get('role'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + res = {'bucket': replace_resource_dict(module.params['bucket'], 'name'), 'entity': module.params['entity']} + return "https://storage.googleapis.com/storage/v1/b/{bucket}/acl/{entity}".format(**res) + + +def collection(module): + res = {'bucket': replace_resource_dict(module.params['bucket'], 'name')} + return "https://storage.googleapis.com/storage/v1/b/{bucket}/acl".format(**res) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'bucket': replace_resource_dict(module.params.get(u'bucket', {}), 'name'), + u'domain': response.get(u'domain'), + u'email': response.get(u'email'), + u'entity': module.params.get('entity'), + u'entityId': response.get(u'entityId'), + u'id': response.get(u'id'), + u'projectTeam': BucketAccessControlProjectteam(response.get(u'projectTeam', {}), module).from_response(), + u'role': response.get(u'role'), + } + + +class BucketAccessControlProjectteam(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'projectNumber': self.request.get('project_number'), u'team': self.request.get('team')}) + + def from_response(self): + return remove_nones_from_dict({u'projectNumber': self.request.get(u'projectNumber'), u'team': self.request.get(u'team')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_storage_default_object_acl.py b/ansible_collections/google/cloud/plugins/modules/gcp_storage_default_object_acl.py new file mode 100644 index 000000000..8a3b538bf --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_storage_default_object_acl.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_storage_default_object_acl +description: +- The DefaultObjectAccessControls resources represent the Access Control Lists (ACLs) + applied to a new object within a Google Cloud Storage bucket when no ACL was provided + for that object. ACLs let you specify who has access to your bucket contents and + to what extent. +- 'There are two roles that can be assigned to an entity: READERs can get an object, + though the acl property will not be revealed.' +- OWNERs are READERs, and they can get the acl property, update an object, and call + all objectAccessControls methods on the object. The owner of an object is always + an OWNER. +- For more information, see Access Control, with the caveat that this API uses READER + and OWNER instead of READ and FULL_CONTROL. +short_description: Creates a GCP DefaultObjectACL +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + bucket: + description: + - The name of the bucket. + - 'This field represents a link to a Bucket resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_storage_bucket task and then set this bucket field to "{{ name-of-resource + }}"' + required: true + type: dict + entity: + description: + - 'The entity holding the permission, in one of the following forms: * user-{{userId}} + * user-{{email}} (such as "user-liz@example.com") * group-{{groupId}} * group-{{email}} + (such as "group-example@googlegroups.com") * domain-{{domain}} (such as "domain-example.com") + * project-team-{{projectId}} * allUsers * allAuthenticatedUsers .' + required: true + type: str + object: + description: + - The name of the object, if applied to an object. + required: false + type: str + role: + description: + - The access permission for the entity. + - 'Some valid choices include: "OWNER", "READER"' + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/storage/docs/json_api/v1/defaultObjectAccessControls)' +- 'Official Documentation: U(https://cloud.google.com/storage/docs/access-control/create-manage-lists)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a bucket + google.cloud.gcp_storage_bucket: + name: "{{ resource_name }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + state: present + register: bucket + +- name: create a default object acl + google.cloud.gcp_storage_default_object_acl: + bucket: "{{ bucket }}" + entity: OWNER:user-alexstephen@google.com + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +bucket: + description: + - The name of the bucket. + returned: success + type: dict +domain: + description: + - The domain associated with the entity. + returned: success + type: str +email: + description: + - The email address associated with the entity. + returned: success + type: str +entity: + description: + - 'The entity holding the permission, in one of the following forms: * user-{{userId}} + * user-{{email}} (such as "user-liz@example.com") * group-{{groupId}} * group-{{email}} + (such as "group-example@googlegroups.com") * domain-{{domain}} (such as "domain-example.com") + * project-team-{{projectId}} * allUsers * allAuthenticatedUsers .' + returned: success + type: str +entityId: + description: + - The ID for the entity. + returned: success + type: str +generation: + description: + - The content generation of the object, if applied to an object. + returned: success + type: int +id: + description: + - The ID of the access-control entry. + returned: success + type: str +object: + description: + - The name of the object, if applied to an object. + returned: success + type: str +projectTeam: + description: + - The project team associated with the entity. + returned: success + type: complex + contains: + projectNumber: + description: + - The project team associated with the entity. + returned: success + type: str + team: + description: + - The team. + returned: success + type: str +role: + description: + - The access permission for the entity. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + bucket=dict(required=True, type='dict'), + entity=dict(required=True, type='str'), + object=dict(type='str'), + role=dict(required=True, type='str'), + ) + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control'] + + state = module.params['state'] + kind = 'storage#objectAccessControl' + + if module.params['id']: + fetch = fetch_resource(module, self_link(module), kind) + else: + fetch = {} + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), kind) + fetch = fetch_resource(module, self_link(module), kind) + changed = True + else: + delete(module, self_link(module), kind) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, collection(module), kind) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.post(link, resource_to_request(module)), kind) + + +def update(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.put(link, resource_to_request(module)), kind) + + +def delete(module, link, kind): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.delete(link), kind) + + +def resource_to_request(module): + request = { + u'kind': 'storage#objectAccessControl', + u'bucket': replace_resource_dict(module.params.get(u'bucket', {}), 'name'), + u'entity': module.params.get('entity'), + u'object': module.params.get('object'), + u'role': module.params.get('role'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, kind, allow_not_found=True): + auth = GcpSession(module, 'storage') + return return_if_object(module, auth.get(link), kind, allow_not_found) + + +def self_link(module): + res = {'bucket': replace_resource_dict(module.params['bucket'], 'name'), 'entity': module.params['entity']} + return "https://storage.googleapis.com/storage/v1/b/{bucket}/defaultObjectAcl/{entity}".format(**res) + + +def collection(module): + res = {'bucket': replace_resource_dict(module.params['bucket'], 'name')} + return "https://storage.googleapis.com/storage/v1/b/{bucket}/defaultObjectAcl".format(**res) + + +def return_if_object(module, response, kind, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'bucket': response.get(u'bucket'), + u'domain': response.get(u'domain'), + u'email': response.get(u'email'), + u'entity': response.get(u'entity'), + u'entityId': response.get(u'entityId'), + u'generation': response.get(u'generation'), + u'id': response.get(u'id'), + u'object': response.get(u'object'), + u'projectTeam': DefaultObjectACLProjectteam(response.get(u'projectTeam', {}), module).from_response(), + u'role': response.get(u'role'), + } + + +class DefaultObjectACLProjectteam(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'projectNumber': self.request.get('project_number'), u'team': self.request.get('team')}) + + def from_response(self): + return remove_nones_from_dict({u'projectNumber': self.request.get(u'projectNumber'), u'team': self.request.get(u'team')}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_storage_object.py b/ansible_collections/google/cloud/plugins/modules/gcp_storage_object.py new file mode 100644 index 000000000..6d497bd64 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_storage_object.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: gcp_storage_object +description: +- Upload or download a file from a GCS bucket. +short_description: Creates a GCP Object +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +- google-cloud-storage >= 1.2.0 +options: + action: + description: + - The actions to be taken on this object. + - You can download the object, upload the object, or delete it. + required: false + type: str + choices: + - download + - upload + - delete + src: + description: + - Source location of file (may be local machine or cloud depending on action). Cloud locations need to be urlencoded including slashes. + required: true + type: path + dest: + description: + - Destination location of file (may be local machine or cloud depending on action). Cloud location need to be urlencoded including slashes. + Required for upload and download. + required: false + type: path + bucket: + description: + - The name of the bucket. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +""" + +EXAMPLES = """ +- name: Download an object + google.cloud.gcp_storage_object: + action: download + bucket: ansible-bucket + src: modules.zip + dest: "~/modules.zip" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +""" + +RETURN = """ +bucket: + description: + - The bucket where the object is contained. + returned: download, upload + type: str +cache_control: + description: + - HTTP 'Cache-Control' header for this object + returned: download, upload + type: str +chunk_size: + description: + - Get the blob's default chunk size + returned: download, upload + type: str +media_link: + description: + - The link for the media + returned: download, upload + type: str +self_link: + description: + - The self_link for the media. + returned: download, upload + type: str +storage_class: + description: + - The storage class for the object. + returned: download, upload + type: str +""" + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + replace_resource_dict, +) +import json +import os +import mimetypes +import hashlib +import base64 + +try: + import google.cloud + from google.cloud import storage + from google.api_core.client_info import ClientInfo + from google.cloud.storage import Blob + + HAS_GOOGLE_STORAGE_LIBRARY = True +except ImportError: + HAS_GOOGLE_STORAGE_LIBRARY = False +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + action=dict(type="str", choices=["download", "upload", "delete"]), + src=dict(type="path"), + dest=dict(type="path"), + bucket=dict(type="str"), + ) + ) + + if module.params["action"] == "upload" and module.params["dest"] is None: + module.fail_json( + msg="`dest` parameter is None: `dest` is required for the upload operation" + ) + + if not HAS_GOOGLE_STORAGE_LIBRARY: + module.fail_json(msg="Please install the google-cloud-storage Python library") + + if not module.params["scopes"]: + module.params["scopes"] = [ + "https://www.googleapis.com/auth/devstorage.full_control" + ] + + creds = GcpSession(module, "storage")._credentials() + client = storage.Client( + project=module.params["project"], + credentials=creds, + client_info=ClientInfo(user_agent="Google-Ansible-MM-object"), + ) + + bucket = client.get_bucket(module.params["bucket"]) + + remote_file_exists = Blob(remote_file_path(module), bucket).exists() + local_file_exists = os.path.isfile(local_file_path(module)) + + # Check if files exist. + results = {} + if module.params["action"] == "delete" and not remote_file_exists: + module.fail_json(msg="File does not exist in bucket") + + if module.params["action"] == "download" and not remote_file_exists: + module.fail_json(msg="File does not exist in bucket") + + if module.params["action"] == "upload" and not local_file_exists: + module.fail_json(msg="File does not exist on disk") + + if module.params["action"] == "delete": + if remote_file_exists: + results = delete_file(module, client, module.params["src"]) + results["changed"] = True + module.params["changed"] = True + + elif module.params["action"] == "download": + results = download_file( + module, client, module.params["src"], module.params["dest"] + ) + results["changed"] = True + + # Upload + else: + results = upload_file( + module, client, module.params["src"], module.params["dest"] + ) + results["changed"] = True + + module.exit_json(**results) + + +def download_file(module, client, name, dest): + try: + bucket = client.get_bucket(module.params["bucket"]) + blob = Blob(name, bucket) + with open(dest, "wb") as file_obj: + blob.download_to_file(file_obj) + return blob_to_dict(blob) + except google.cloud.exceptions.NotFound as e: + module.fail_json(msg=str(e)) + + +def upload_file(module, client, src, dest): + try: + bucket = client.get_bucket(module.params["bucket"]) + blob = Blob(dest, bucket) + with open(src, "rb") as file_obj: + blob.upload_from_file(file_obj) + return blob_to_dict(blob) + except google.cloud.exceptions.GoogleCloudError as e: + module.fail_json(msg=str(e)) + + +def delete_file(module, client, name): + try: + bucket = client.get_bucket(module.params["bucket"]) + blob = Blob(name, bucket) + blob.delete() + return {} + except google.cloud.exceptions.NotFound as e: + module.fail_json(msg=str(e)) + + +def local_file_path(module): + if module.params["action"] == "download": + return module.params["dest"] + else: + return module.params["src"] + + +def remote_file_path(module): + if module.params["action"] == "download": + return module.params["src"] + elif module.params["action"] == "delete": + return module.params["src"] + else: + return module.params["dest"] + + +def blob_to_dict(blob): + return { + "bucket": {"name": blob.bucket.path}, + "cache_control": blob.cache_control, + "chunk_size": blob.chunk_size, + "media_link": blob.media_link, + "self_link": blob.self_link, + "storage_class": blob.storage_class, + } + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_tpu_node.py b/ansible_collections/google/cloud/plugins/modules/gcp_tpu_node.py new file mode 100644 index 000000000..8a7e11fb0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_tpu_node.py @@ -0,0 +1,554 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_tpu_node +description: +- A Cloud TPU instance. +short_description: Creates a GCP Node +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + state: + description: + - Whether the given object should exist in GCP + choices: + - present + - absent + default: present + type: str + name: + description: + - The immutable name of the TPU. + required: true + type: str + description: + description: + - The user-supplied description of the TPU. Maximum of 512 characters. + required: false + type: str + accelerator_type: + description: + - The type of hardware accelerators associated with this node. + required: true + type: str + tensorflow_version: + description: + - The version of Tensorflow running in the Node. + required: true + type: str + network: + description: + - The name of a network to peer the TPU node to. It must be a preexisting Compute + Engine network inside of the project on which this API has been activated. If + none is provided, "default" will be used. + required: false + type: str + cidr_block: + description: + - The CIDR block that the TPU node will use when selecting an IP address. This + CIDR block must be a /29 block; the Compute Engine networks API forbids a smaller + block, and using a larger block would be wasteful (a node can only consume one + IP address). + - Errors will occur if the CIDR block has already been used for a currently existing + TPU node, the CIDR block conflicts with any subnetworks in the user's provided + network, or the provided network is peered with another network that is using + that CIDR block. + required: false + type: str + use_service_networking: + description: + - Whether the VPC peering for the node is set up through Service Networking API. + - The VPC Peering should be set up before provisioning the node. If this field + is set, cidr_block field should not be specified. If the network that you want + to peer the TPU Node to is a Shared VPC network, the node must be created with + this this field enabled. + required: false + default: 'false' + type: bool + scheduling_config: + description: + - Sets the scheduling options for this TPU instance. + required: false + type: dict + suboptions: + preemptible: + description: + - Defines whether the TPU instance is preemptible. + required: true + type: bool + labels: + description: + - Resource labels to represent user provided metadata. + required: false + type: dict + zone: + description: + - The GCP location for the TPU. If it is not provided, the provider zone is used. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- 'API Reference: U(https://cloud.google.com/tpu/docs/reference/rest/v1/projects.locations.nodes)' +- 'Official Documentation: U(https://cloud.google.com/tpu/docs/)' +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: create a node + google.cloud.gcp_tpu_node: + name: test_object + zone: us-central1-b + accelerator_type: v3-8 + tensorflow_version: '1.11' + cidr_block: 10.2.0.0/29 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" + state: present +''' + +RETURN = ''' +name: + description: + - The immutable name of the TPU. + returned: success + type: str +description: + description: + - The user-supplied description of the TPU. Maximum of 512 characters. + returned: success + type: str +acceleratorType: + description: + - The type of hardware accelerators associated with this node. + returned: success + type: str +tensorflowVersion: + description: + - The version of Tensorflow running in the Node. + returned: success + type: str +network: + description: + - The name of a network to peer the TPU node to. It must be a preexisting Compute + Engine network inside of the project on which this API has been activated. If + none is provided, "default" will be used. + returned: success + type: str +cidrBlock: + description: + - The CIDR block that the TPU node will use when selecting an IP address. This CIDR + block must be a /29 block; the Compute Engine networks API forbids a smaller block, + and using a larger block would be wasteful (a node can only consume one IP address). + - Errors will occur if the CIDR block has already been used for a currently existing + TPU node, the CIDR block conflicts with any subnetworks in the user's provided + network, or the provided network is peered with another network that is using + that CIDR block. + returned: success + type: str +serviceAccount: + description: + - The service account used to run the tensor flow services within the node. To share + resources, including Google Cloud Storage data, with the Tensorflow job running + in the Node, this account must have permissions to that data. + returned: success + type: str +useServiceNetworking: + description: + - Whether the VPC peering for the node is set up through Service Networking API. + - The VPC Peering should be set up before provisioning the node. If this field is + set, cidr_block field should not be specified. If the network that you want to + peer the TPU Node to is a Shared VPC network, the node must be created with this + this field enabled. + returned: success + type: bool +schedulingConfig: + description: + - Sets the scheduling options for this TPU instance. + returned: success + type: complex + contains: + preemptible: + description: + - Defines whether the TPU instance is preemptible. + returned: success + type: bool +networkEndpoints: + description: + - The network endpoints where TPU workers can be accessed and sent work. + - It is recommended that Tensorflow clients of the node first reach out to the first + (index 0) entry. + returned: success + type: complex + contains: + ipAddress: + description: + - The IP address of this network endpoint. + returned: success + type: str + port: + description: + - The port of this network endpoint. + returned: success + type: int +labels: + description: + - Resource labels to represent user provided metadata. + returned: success + type: dict +zone: + description: + - The GCP location for the TPU. If it is not provided, the provider zone is used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ + +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import ( + navigate_hash, + GcpSession, + GcpModule, + GcpRequest, + remove_nones_from_dict, + replace_resource_dict, +) +import json +import time + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = GcpModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + description=dict(type='str'), + accelerator_type=dict(required=True, type='str'), + tensorflow_version=dict(required=True, type='str'), + network=dict(type='str'), + cidr_block=dict(type='str'), + use_service_networking=dict(type='bool'), + scheduling_config=dict(type='dict', options=dict(preemptible=dict(required=True, type='bool'))), + labels=dict(type='dict'), + zone=dict(type='str'), + ), + mutually_exclusive=[['cidr_block', 'use_service_networking']], + ) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + state = module.params['state'] + + fetch = fetch_resource(module, self_link(module)) + changed = False + + if fetch: + if state == 'present': + if is_different(module, fetch): + update(module, self_link(module), fetch) + fetch = fetch_resource(module, self_link(module)) + changed = True + else: + delete(module, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + fetch = create(module, create_link(module)) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(module, link): + auth = GcpSession(module, 'tpu') + return wait_for_operation(module, auth.post(link, resource_to_request(module))) + + +def update(module, link, fetch): + update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) + return fetch_resource(module, self_link(module)) + + +def update_fields(module, request, response): + if response.get('tensorflowVersion') != request.get('tensorflowVersion'): + tensorflow_version_update(module, request, response) + + +def tensorflow_version_update(module, request, response): + auth = GcpSession(module, 'tpu') + auth.post( + ''.join(["https://tpu.googleapis.com/v1/", "projects/{project}/locations/{zone}/nodes/{name}:reimage"]).format(**module.params), + {u'tensorflowVersion': module.params.get('tensorflow_version')}, + ) + + +def delete(module, link): + auth = GcpSession(module, 'tpu') + return wait_for_operation(module, auth.delete(link)) + + +def resource_to_request(module): + request = { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'acceleratorType': module.params.get('accelerator_type'), + u'tensorflowVersion': module.params.get('tensorflow_version'), + u'network': module.params.get('network'), + u'cidrBlock': module.params.get('cidr_block'), + u'useServiceNetworking': module.params.get('use_service_networking'), + u'schedulingConfig': NodeSchedulingconfig(module.params.get('scheduling_config', {}), module).to_request(), + u'labels': module.params.get('labels'), + } + return_vals = {} + for k, v in request.items(): + if v or v is False: + return_vals[k] = v + + return return_vals + + +def fetch_resource(module, link, allow_not_found=True): + auth = GcpSession(module, 'tpu') + return return_if_object(module, auth.get(link), allow_not_found) + + +def self_link(module): + return "https://tpu.googleapis.com/v1/projects/{project}/locations/{zone}/nodes/{name}".format(**module.params) + + +def collection(module): + return "https://tpu.googleapis.com/v1/projects/{project}/locations/{zone}/nodes".format(**module.params) + + +def create_link(module): + return "https://tpu.googleapis.com/v1/projects/{project}/locations/{zone}/nodes?nodeId={name}".format(**module.params) + + +def return_if_object(module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError): + module.fail_json(msg="Invalid JSON response with error: %s" % response.text) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +def is_different(module, response): + request = resource_to_request(module) + response = response_to_hash(module, response) + + # Remove all output-only from response. + response_vals = {} + for k, v in response.items(): + if k in request: + response_vals[k] = v + + request_vals = {} + for k, v in request.items(): + if k in response: + request_vals[k] = v + + return GcpRequest(request_vals) != GcpRequest(response_vals) + + +# Remove unnecessary properties from the response. +# This is for doing comparisons with Ansible's current parameters. +def response_to_hash(module, response): + return { + u'name': module.params.get('name'), + u'description': module.params.get('description'), + u'acceleratorType': module.params.get('accelerator_type'), + u'tensorflowVersion': response.get(u'tensorflowVersion'), + u'network': module.params.get('network'), + u'cidrBlock': module.params.get('cidr_block'), + u'serviceAccount': response.get(u'serviceAccount'), + u'useServiceNetworking': module.params.get('use_service_networking'), + u'schedulingConfig': NodeSchedulingconfig(module.params.get('scheduling_config', {}), module).to_request(), + u'networkEndpoints': NodeNetworkendpointsArray(response.get(u'networkEndpoints', []), module).from_response(), + u'labels': module.params.get('labels'), + } + + +def async_op_url(module, extra_data=None): + if extra_data is None: + extra_data = {} + url = "https://tpu.googleapis.com/v1/{op_id}" + combined = extra_data.copy() + combined.update(module.params) + return url.format(**combined) + + +def wait_for_operation(module, response): + op_result = return_if_object(module, response) + if op_result is None: + return {} + status = navigate_hash(op_result, ['done']) + wait_done = wait_for_completion(status, op_result, module) + raise_if_errors(wait_done, ['error'], module) + return navigate_hash(wait_done, ['response']) + + +def wait_for_completion(status, op_result, module): + op_id = navigate_hash(op_result, ['name']) + op_uri = async_op_url(module, {'op_id': op_id}) + while not status: + raise_if_errors(op_result, ['error'], module) + time.sleep(1.0) + op_result = fetch_resource(module, op_uri, False) + status = navigate_hash(op_result, ['done']) + return op_result + + +def raise_if_errors(response, err_path, module): + errors = navigate_hash(response, err_path) + if errors is not None: + module.fail_json(msg=errors) + + +class NodeSchedulingconfig(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = {} + + def to_request(self): + return remove_nones_from_dict({u'preemptible': self.request.get('preemptible')}) + + def from_response(self): + return remove_nones_from_dict({u'preemptible': self.request.get(u'preemptible')}) + + +class NodeNetworkendpointsArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return remove_nones_from_dict({}) + + def _response_from_item(self, item): + return remove_nones_from_dict({}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/google/cloud/plugins/modules/gcp_tpu_node_info.py b/ansible_collections/google/cloud/plugins/modules/gcp_tpu_node_info.py new file mode 100644 index 000000000..cd27a67a0 --- /dev/null +++ b/ansible_collections/google/cloud/plugins/modules/gcp_tpu_node_info.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_tpu_node_info +description: +- Gather info for GCP Node +short_description: Gather info for GCP Node +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + zone: + description: + - The GCP location for the TPU. If it is not provided, the provider zone is used. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(GCP_SERVICE_ACCOUNT_FILE) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a node + gcp_tpu_node_info: + zone: us-central1-b + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The immutable name of the TPU. + returned: success + type: str + description: + description: + - The user-supplied description of the TPU. Maximum of 512 characters. + returned: success + type: str + acceleratorType: + description: + - The type of hardware accelerators associated with this node. + returned: success + type: str + tensorflowVersion: + description: + - The version of Tensorflow running in the Node. + returned: success + type: str + network: + description: + - The name of a network to peer the TPU node to. It must be a preexisting Compute + Engine network inside of the project on which this API has been activated. + If none is provided, "default" will be used. + returned: success + type: str + cidrBlock: + description: + - The CIDR block that the TPU node will use when selecting an IP address. This + CIDR block must be a /29 block; the Compute Engine networks API forbids a + smaller block, and using a larger block would be wasteful (a node can only + consume one IP address). + - Errors will occur if the CIDR block has already been used for a currently + existing TPU node, the CIDR block conflicts with any subnetworks in the user's + provided network, or the provided network is peered with another network that + is using that CIDR block. + returned: success + type: str + serviceAccount: + description: + - The service account used to run the tensor flow services within the node. + To share resources, including Google Cloud Storage data, with the Tensorflow + job running in the Node, this account must have permissions to that data. + returned: success + type: str + useServiceNetworking: + description: + - Whether the VPC peering for the node is set up through Service Networking + API. + - The VPC Peering should be set up before provisioning the node. If this field + is set, cidr_block field should not be specified. If the network that you + want to peer the TPU Node to is a Shared VPC network, the node must be created + with this this field enabled. + returned: success + type: bool + schedulingConfig: + description: + - Sets the scheduling options for this TPU instance. + returned: success + type: complex + contains: + preemptible: + description: + - Defines whether the TPU instance is preemptible. + returned: success + type: bool + networkEndpoints: + description: + - The network endpoints where TPU workers can be accessed and sent work. + - It is recommended that Tensorflow clients of the node first reach out to the + first (index 0) entry. + returned: success + type: complex + contains: + ipAddress: + description: + - The IP address of this network endpoint. + returned: success + type: str + port: + description: + - The port of this network endpoint. + returned: success + type: int + labels: + description: + - Resource labels to represent user provided metadata. + returned: success + type: dict + zone: + description: + - The GCP location for the TPU. If it is not provided, the provider zone is + used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(zone=dict(type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://tpu.googleapis.com/v1/projects/{project}/locations/{zone}/nodes".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'tpu') + return auth.list(link, return_if_object, array_name='nodes') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() -- cgit v1.2.3